code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def new_game(self):
"""Creates a new game of 2048."""
self.game = self.game_class(self, self.screen)
self.save() | Creates a new game of 2048. | Below is the the instruction that describes the task:
### Input:
Creates a new game of 2048.
### Response:
def new_game(self):
"""Creates a new game of 2048."""
self.game = self.game_class(self, self.screen)
self.save() |
def asArcPyObject(self):
""" returns the Point as an ESRI arcpy.Point object """
if arcpyFound == False:
raise Exception("ArcPy is required to use this function")
return arcpy.AsShape(self.asDictionary, True) | returns the Point as an ESRI arcpy.Point object | Below is the the instruction that describes the task:
### Input:
returns the Point as an ESRI arcpy.Point object
### Response:
def asArcPyObject(self):
""" returns the Point as an ESRI arcpy.Point object """
if arcpyFound == False:
raise Exception("ArcPy is required to use this function")
return arcpy.AsShape(self.asDictionary, True) |
def get_by_id(self, id_networkv6):
"""Get IPv6 network
:param id_networkv4: ID for NetworkIPv6
:return: IPv6 Network
"""
uri = 'api/networkv4/%s/' % id_networkv6
return super(ApiNetworkIPv6, self).get(uri) | Get IPv6 network
:param id_networkv4: ID for NetworkIPv6
:return: IPv6 Network | Below is the the instruction that describes the task:
### Input:
Get IPv6 network
:param id_networkv4: ID for NetworkIPv6
:return: IPv6 Network
### Response:
def get_by_id(self, id_networkv6):
"""Get IPv6 network
:param id_networkv4: ID for NetworkIPv6
:return: IPv6 Network
"""
uri = 'api/networkv4/%s/' % id_networkv6
return super(ApiNetworkIPv6, self).get(uri) |
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1 | Return the source line number of a line number in the
generated bytecode as they are not in sync. | Below is the the instruction that describes the task:
### Input:
Return the source line number of a line number in the
generated bytecode as they are not in sync.
### Response:
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1 |
def rover_yaw_rate(VFR_HUD, SERVO_OUTPUT_RAW):
'''return yaw rate in degrees/second given steering_angle and speed'''
max_wheel_turn=35
speed = VFR_HUD.groundspeed
# assume 1100 to 1900 PWM on steering
steering_angle = max_wheel_turn * (SERVO_OUTPUT_RAW.servo1_raw - 1500) / 400.0
if abs(steering_angle) < 1.0e-6 or abs(speed) < 1.0e-6:
return 0
d = rover_turn_circle(SERVO_OUTPUT_RAW)
c = pi * d
t = c / speed
rate = 360.0 / t
return rate | return yaw rate in degrees/second given steering_angle and speed | Below is the the instruction that describes the task:
### Input:
return yaw rate in degrees/second given steering_angle and speed
### Response:
def rover_yaw_rate(VFR_HUD, SERVO_OUTPUT_RAW):
'''return yaw rate in degrees/second given steering_angle and speed'''
max_wheel_turn=35
speed = VFR_HUD.groundspeed
# assume 1100 to 1900 PWM on steering
steering_angle = max_wheel_turn * (SERVO_OUTPUT_RAW.servo1_raw - 1500) / 400.0
if abs(steering_angle) < 1.0e-6 or abs(speed) < 1.0e-6:
return 0
d = rover_turn_circle(SERVO_OUTPUT_RAW)
c = pi * d
t = c / speed
rate = 360.0 / t
return rate |
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close() | Load cookies from a file. | Below is the the instruction that describes the task:
### Input:
Load cookies from a file.
### Response:
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close() |
def _indentLines(str, indentLevels = 1, indentFirstLine=True):
""" Indent all lines in the given string
str: input string
indentLevels: number of levels of indentation to apply
indentFirstLine: if False, the 1st line will not be indented
Returns: The result string with all lines indented
"""
indent = _ONE_INDENT * indentLevels
lines = str.splitlines(True)
result = ''
if len(lines) > 0 and not indentFirstLine:
first = 1
result += lines[0]
else:
first = 0
for line in lines[first:]:
result += indent + line
return result | Indent all lines in the given string
str: input string
indentLevels: number of levels of indentation to apply
indentFirstLine: if False, the 1st line will not be indented
Returns: The result string with all lines indented | Below is the the instruction that describes the task:
### Input:
Indent all lines in the given string
str: input string
indentLevels: number of levels of indentation to apply
indentFirstLine: if False, the 1st line will not be indented
Returns: The result string with all lines indented
### Response:
def _indentLines(str, indentLevels = 1, indentFirstLine=True):
""" Indent all lines in the given string
str: input string
indentLevels: number of levels of indentation to apply
indentFirstLine: if False, the 1st line will not be indented
Returns: The result string with all lines indented
"""
indent = _ONE_INDENT * indentLevels
lines = str.splitlines(True)
result = ''
if len(lines) > 0 and not indentFirstLine:
first = 1
result += lines[0]
else:
first = 0
for line in lines[first:]:
result += indent + line
return result |
def register_halfmaxes(self, telescope, band, lower, upper):
"""Register precomputed half-max points."""
if (telescope, band) in self._halfmaxes:
raise AlreadyDefinedError('half-max points for %s/%s already '
'defined', telescope, band)
self._note(telescope, band)
self._halfmaxes[telescope,band] = (lower, upper)
return self | Register precomputed half-max points. | Below is the the instruction that describes the task:
### Input:
Register precomputed half-max points.
### Response:
def register_halfmaxes(self, telescope, band, lower, upper):
"""Register precomputed half-max points."""
if (telescope, band) in self._halfmaxes:
raise AlreadyDefinedError('half-max points for %s/%s already '
'defined', telescope, band)
self._note(telescope, band)
self._halfmaxes[telescope,band] = (lower, upper)
return self |
def cp_files(self, source, target, delete_source=False):
'''Copy files
This function can handle multiple files if source S3 URL has wildcard
characters. It also handles recursive mode by copying all files and
keep the directory structure.
'''
pool = ThreadPool(ThreadUtil, self.opt)
source = self.source_expand(source)
if target[-1] == PATH_SEP:
for src in source:
self.cp_single_file(pool, src, os.path.join(target, self.get_basename(S3URL(src).path)), delete_source)
else:
if len(source) > 1:
raise Failure('Target "%s" is not a directory (with a trailing slash).' % target)
# Copy file if it exists otherwise do nothing
elif len(source) == 1:
self.cp_single_file(pool, source[0], target, delete_source)
else:
# Source expand may return empty list only if ignore-empty-source is set to true
pass
pool.join() | Copy files
This function can handle multiple files if source S3 URL has wildcard
characters. It also handles recursive mode by copying all files and
keep the directory structure. | Below is the the instruction that describes the task:
### Input:
Copy files
This function can handle multiple files if source S3 URL has wildcard
characters. It also handles recursive mode by copying all files and
keep the directory structure.
### Response:
def cp_files(self, source, target, delete_source=False):
'''Copy files
This function can handle multiple files if source S3 URL has wildcard
characters. It also handles recursive mode by copying all files and
keep the directory structure.
'''
pool = ThreadPool(ThreadUtil, self.opt)
source = self.source_expand(source)
if target[-1] == PATH_SEP:
for src in source:
self.cp_single_file(pool, src, os.path.join(target, self.get_basename(S3URL(src).path)), delete_source)
else:
if len(source) > 1:
raise Failure('Target "%s" is not a directory (with a trailing slash).' % target)
# Copy file if it exists otherwise do nothing
elif len(source) == 1:
self.cp_single_file(pool, source[0], target, delete_source)
else:
# Source expand may return empty list only if ignore-empty-source is set to true
pass
pool.join() |
def plot_roc_curve(self, on, bootstrap_samples=100, ax=None, **kwargs):
"""Plot an ROC curve for benefit and a given variable
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
bootstrap_samples : int, optional
Number of boostrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_auc_score, plot): (float, matplotlib plot)
Returns the average AUC for the given predictor over `bootstrap_samples`
and the associated ROC curve
"""
plot_col, df = self.as_dataframe(on, return_cols=True, **kwargs)
df = filter_not_null(df, "benefit")
df = filter_not_null(df, plot_col)
df.benefit = df.benefit.astype(bool)
return roc_curve_plot(df, plot_col, "benefit", bootstrap_samples, ax=ax) | Plot an ROC curve for benefit and a given variable
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
bootstrap_samples : int, optional
Number of boostrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_auc_score, plot): (float, matplotlib plot)
Returns the average AUC for the given predictor over `bootstrap_samples`
and the associated ROC curve | Below is the the instruction that describes the task:
### Input:
Plot an ROC curve for benefit and a given variable
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
bootstrap_samples : int, optional
Number of boostrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_auc_score, plot): (float, matplotlib plot)
Returns the average AUC for the given predictor over `bootstrap_samples`
and the associated ROC curve
### Response:
def plot_roc_curve(self, on, bootstrap_samples=100, ax=None, **kwargs):
"""Plot an ROC curve for benefit and a given variable
Parameters
----------
on : str or function or list or dict
See `cohort.load.as_dataframe`
bootstrap_samples : int, optional
Number of boostrap samples to use to compute the AUC
ax : Axes, default None
Axes to plot on
Returns
-------
(mean_auc_score, plot): (float, matplotlib plot)
Returns the average AUC for the given predictor over `bootstrap_samples`
and the associated ROC curve
"""
plot_col, df = self.as_dataframe(on, return_cols=True, **kwargs)
df = filter_not_null(df, "benefit")
df = filter_not_null(df, plot_col)
df.benefit = df.benefit.astype(bool)
return roc_curve_plot(df, plot_col, "benefit", bootstrap_samples, ax=ax) |
def _get_headers(self):
"""Get all the headers we're going to need:
1. Authorization
2. Content-Type
3. User-agent
Note that the User-agent string contains the library name, the
libary version, and the python version. This will help us track
what people are using, and where we should concentrate our
development efforts."""
user_agent = __api_lib_name__ + '/' + __version__ + '/' + \
PYTHON_VERSION
headers = {'User-Agent': user_agent,
'Content-Type': 'application/x-www-form-urlencoded'}
if self.key:
headers['Authorization'] = 'Bearer ' + self.key
return headers | Get all the headers we're going to need:
1. Authorization
2. Content-Type
3. User-agent
Note that the User-agent string contains the library name, the
libary version, and the python version. This will help us track
what people are using, and where we should concentrate our
development efforts. | Below is the the instruction that describes the task:
### Input:
Get all the headers we're going to need:
1. Authorization
2. Content-Type
3. User-agent
Note that the User-agent string contains the library name, the
libary version, and the python version. This will help us track
what people are using, and where we should concentrate our
development efforts.
### Response:
def _get_headers(self):
"""Get all the headers we're going to need:
1. Authorization
2. Content-Type
3. User-agent
Note that the User-agent string contains the library name, the
libary version, and the python version. This will help us track
what people are using, and where we should concentrate our
development efforts."""
user_agent = __api_lib_name__ + '/' + __version__ + '/' + \
PYTHON_VERSION
headers = {'User-Agent': user_agent,
'Content-Type': 'application/x-www-form-urlencoded'}
if self.key:
headers['Authorization'] = 'Bearer ' + self.key
return headers |
def parsestr(self, argsstr, usedname, location):
"""Parse a string lexically and store the result.
ARGS:
argsstr <str>:
The string to parse.
usedname <str>:
The string used by the user to invoke the option.
location <str>:
A user friendly sring describing where the parser got this
data from.
"""
try:
value = self.format.parsestr(argsstr)
except formats.BadNumberOfArguments, e:
raise BadNumberOfArguments(usedname, e.required, e.supplied)
except formats.BadArgument, e:
raise BadArgument(usedname, e.argument, e.message)
if self.recurring:
self.value.append(value)
else:
self.value = value
self.location = location | Parse a string lexically and store the result.
ARGS:
argsstr <str>:
The string to parse.
usedname <str>:
The string used by the user to invoke the option.
location <str>:
A user friendly sring describing where the parser got this
data from. | Below is the the instruction that describes the task:
### Input:
Parse a string lexically and store the result.
ARGS:
argsstr <str>:
The string to parse.
usedname <str>:
The string used by the user to invoke the option.
location <str>:
A user friendly sring describing where the parser got this
data from.
### Response:
def parsestr(self, argsstr, usedname, location):
"""Parse a string lexically and store the result.
ARGS:
argsstr <str>:
The string to parse.
usedname <str>:
The string used by the user to invoke the option.
location <str>:
A user friendly sring describing where the parser got this
data from.
"""
try:
value = self.format.parsestr(argsstr)
except formats.BadNumberOfArguments, e:
raise BadNumberOfArguments(usedname, e.required, e.supplied)
except formats.BadArgument, e:
raise BadArgument(usedname, e.argument, e.message)
if self.recurring:
self.value.append(value)
else:
self.value = value
self.location = location |
def gzip_if_smaller(content_related, data):
"""Calls bytes(request), and based on a certain threshold,
optionally gzips the resulting data. If the gzipped data is
smaller than the original byte array, this is returned instead.
Note that this only applies to content related requests.
"""
if content_related and len(data) > 512:
gzipped = bytes(GzipPacked(data))
return gzipped if len(gzipped) < len(data) else data
else:
return data | Calls bytes(request), and based on a certain threshold,
optionally gzips the resulting data. If the gzipped data is
smaller than the original byte array, this is returned instead.
Note that this only applies to content related requests. | Below is the the instruction that describes the task:
### Input:
Calls bytes(request), and based on a certain threshold,
optionally gzips the resulting data. If the gzipped data is
smaller than the original byte array, this is returned instead.
Note that this only applies to content related requests.
### Response:
def gzip_if_smaller(content_related, data):
"""Calls bytes(request), and based on a certain threshold,
optionally gzips the resulting data. If the gzipped data is
smaller than the original byte array, this is returned instead.
Note that this only applies to content related requests.
"""
if content_related and len(data) > 512:
gzipped = bytes(GzipPacked(data))
return gzipped if len(gzipped) < len(data) else data
else:
return data |
def resolveport(self, definitions):
"""
Resolve port_type reference.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
ref = qualify(self.type, self.root, definitions.tns)
port_type = definitions.port_types.get(ref)
if port_type is None:
raise Exception("portType '%s', not-found" % self.type)
else:
self.type = port_type | Resolve port_type reference.
@param definitions: A definitions object.
@type definitions: L{Definitions} | Below is the the instruction that describes the task:
### Input:
Resolve port_type reference.
@param definitions: A definitions object.
@type definitions: L{Definitions}
### Response:
def resolveport(self, definitions):
"""
Resolve port_type reference.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
ref = qualify(self.type, self.root, definitions.tns)
port_type = definitions.port_types.get(ref)
if port_type is None:
raise Exception("portType '%s', not-found" % self.type)
else:
self.type = port_type |
def _extrac_qtl(peak, block, headers):
""" Given a row containing the peak of the QTL and all the rows of
the linkage group of the said QTL (splitted per trait), determine
the QTL interval and find the start and stop marker of the said
interval.
The interval is a LOD 2 interval.
The approach is conservative in the way it takes the first and last
marker within the interval.
:arg peak, a list containing the row information for the peak marker
:arg block, a hash containing per column, all the rows in the
linkage group of this QTL, splitted per trait.
:arg headers, the first row of the QTL matrix file, used to determine
which block to look at for each trait process.
"""
qtls = []
if not peak:
return qtls
threshold = 2
for trait in peak:
blockcnt = headers.index(trait)
local_block = block[blockcnt]
lod2_threshold = float(peak[trait][-1]) - float(threshold)
# Search QTL start
cnt = local_block.index(peak[trait])
start = local_block[cnt]
while cnt >= 0:
start = local_block[cnt]
if re.match(r'c\d+\.loc[\d\.]+', local_block[cnt][0]):
cnt = cnt - 1
continue
if float(local_block[cnt][-1]) < lod2_threshold:
break
cnt = cnt - 1
# Search QTL end
end = []
cnt = local_block.index(peak[trait])
end = local_block[cnt]
while cnt < len(local_block):
end = local_block[cnt]
if re.match(r'c\d+\.loc[\d\.]+', local_block[cnt][0]):
cnt += 1
continue
if float(local_block[cnt][-1]) < lod2_threshold:
break
cnt = cnt + 1
qtl = QTL()
qtl.trait = trait
qtl.start_mk = start[0]
qtl.start_position = start[2]
qtl.peak_mk = peak[trait][0]
qtl.peak_start_position = peak[trait][2]
qtl.peak_stop_position = peak[trait][2]
qtl.stop_mk = end[0]
qtl.stop_position = end[2]
qtls.append(qtl)
return qtls | Given a row containing the peak of the QTL and all the rows of
the linkage group of the said QTL (splitted per trait), determine
the QTL interval and find the start and stop marker of the said
interval.
The interval is a LOD 2 interval.
The approach is conservative in the way it takes the first and last
marker within the interval.
:arg peak, a list containing the row information for the peak marker
:arg block, a hash containing per column, all the rows in the
linkage group of this QTL, splitted per trait.
:arg headers, the first row of the QTL matrix file, used to determine
which block to look at for each trait process. | Below is the the instruction that describes the task:
### Input:
Given a row containing the peak of the QTL and all the rows of
the linkage group of the said QTL (splitted per trait), determine
the QTL interval and find the start and stop marker of the said
interval.
The interval is a LOD 2 interval.
The approach is conservative in the way it takes the first and last
marker within the interval.
:arg peak, a list containing the row information for the peak marker
:arg block, a hash containing per column, all the rows in the
linkage group of this QTL, splitted per trait.
:arg headers, the first row of the QTL matrix file, used to determine
which block to look at for each trait process.
### Response:
def _extrac_qtl(peak, block, headers):
""" Given a row containing the peak of the QTL and all the rows of
the linkage group of the said QTL (splitted per trait), determine
the QTL interval and find the start and stop marker of the said
interval.
The interval is a LOD 2 interval.
The approach is conservative in the way it takes the first and last
marker within the interval.
:arg peak, a list containing the row information for the peak marker
:arg block, a hash containing per column, all the rows in the
linkage group of this QTL, splitted per trait.
:arg headers, the first row of the QTL matrix file, used to determine
which block to look at for each trait process.
"""
qtls = []
if not peak:
return qtls
threshold = 2
for trait in peak:
blockcnt = headers.index(trait)
local_block = block[blockcnt]
lod2_threshold = float(peak[trait][-1]) - float(threshold)
# Search QTL start
cnt = local_block.index(peak[trait])
start = local_block[cnt]
while cnt >= 0:
start = local_block[cnt]
if re.match(r'c\d+\.loc[\d\.]+', local_block[cnt][0]):
cnt = cnt - 1
continue
if float(local_block[cnt][-1]) < lod2_threshold:
break
cnt = cnt - 1
# Search QTL end
end = []
cnt = local_block.index(peak[trait])
end = local_block[cnt]
while cnt < len(local_block):
end = local_block[cnt]
if re.match(r'c\d+\.loc[\d\.]+', local_block[cnt][0]):
cnt += 1
continue
if float(local_block[cnt][-1]) < lod2_threshold:
break
cnt = cnt + 1
qtl = QTL()
qtl.trait = trait
qtl.start_mk = start[0]
qtl.start_position = start[2]
qtl.peak_mk = peak[trait][0]
qtl.peak_start_position = peak[trait][2]
qtl.peak_stop_position = peak[trait][2]
qtl.stop_mk = end[0]
qtl.stop_position = end[2]
qtls.append(qtl)
return qtls |
def get_min_max_mag(self):
"""
:returns: minumum and maximum magnitudes from the underlying MFDs
"""
m1s, m2s = [], []
for mfd in self:
m1, m2 = mfd.get_min_max_mag()
m1s.append(m1)
m2s.append(m2)
return min(m1s), max(m2s) | :returns: minumum and maximum magnitudes from the underlying MFDs | Below is the the instruction that describes the task:
### Input:
:returns: minumum and maximum magnitudes from the underlying MFDs
### Response:
def get_min_max_mag(self):
"""
:returns: minumum and maximum magnitudes from the underlying MFDs
"""
m1s, m2s = [], []
for mfd in self:
m1, m2 = mfd.get_min_max_mag()
m1s.append(m1)
m2s.append(m2)
return min(m1s), max(m2s) |
def show(self):
"""write my output to sys.stdout/err as appropriate"""
sys.stdout.write(self.stdout)
sys.stderr.write(self.stderr)
sys.stdout.flush()
sys.stderr.flush() | write my output to sys.stdout/err as appropriate | Below is the the instruction that describes the task:
### Input:
write my output to sys.stdout/err as appropriate
### Response:
def show(self):
"""write my output to sys.stdout/err as appropriate"""
sys.stdout.write(self.stdout)
sys.stderr.write(self.stderr)
sys.stdout.flush()
sys.stderr.flush() |
def upload_complete(self, path, url, quiet):
""" function to complete an upload to retrieve a path from a url
Parameters
==========
path: the path for the upload that is read in
url: the url to send the POST to
quiet: suppress verbose output (default is False)
"""
file_size = os.path.getsize(path)
try:
with tqdm(
total=file_size,
unit='B',
unit_scale=True,
unit_divisor=1024,
disable=quiet) as progress_bar:
with io.open(path, 'rb', buffering=0) as fp:
reader = TqdmBufferedReader(fp, progress_bar)
session = requests.Session()
retries = Retry(total=10, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
response = session.put(url, data=reader)
except Exception as error:
print(error)
return False
return response.status_code == 200 or response.status_code == 201 | function to complete an upload to retrieve a path from a url
Parameters
==========
path: the path for the upload that is read in
url: the url to send the POST to
quiet: suppress verbose output (default is False) | Below is the the instruction that describes the task:
### Input:
function to complete an upload to retrieve a path from a url
Parameters
==========
path: the path for the upload that is read in
url: the url to send the POST to
quiet: suppress verbose output (default is False)
### Response:
def upload_complete(self, path, url, quiet):
""" function to complete an upload to retrieve a path from a url
Parameters
==========
path: the path for the upload that is read in
url: the url to send the POST to
quiet: suppress verbose output (default is False)
"""
file_size = os.path.getsize(path)
try:
with tqdm(
total=file_size,
unit='B',
unit_scale=True,
unit_divisor=1024,
disable=quiet) as progress_bar:
with io.open(path, 'rb', buffering=0) as fp:
reader = TqdmBufferedReader(fp, progress_bar)
session = requests.Session()
retries = Retry(total=10, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retries)
session.mount('http://', adapter)
session.mount('https://', adapter)
response = session.put(url, data=reader)
except Exception as error:
print(error)
return False
return response.status_code == 200 or response.status_code == 201 |
def diffusion_mds(means, weights, d, diffusion_rounds=10):
"""
Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
"""
for i in range(diffusion_rounds):
weights = weights*weights
weights = weights/weights.sum(0)
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights) | Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells) | Below is the the instruction that describes the task:
### Input:
Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
### Response:
def diffusion_mds(means, weights, d, diffusion_rounds=10):
"""
Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
"""
for i in range(diffusion_rounds):
weights = weights*weights
weights = weights/weights.sum(0)
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights) |
def unregister(self, cleanup_mode):
"""Unregisters a machine previously registered with
:py:func:`IVirtualBox.register_machine` and optionally do additional
cleanup before the machine is unregistered.
This method does not delete any files. It only changes the machine configuration and
the list of registered machines in the VirtualBox object. To delete the files which
belonged to the machine, including the XML file of the machine itself, call
:py:func:`delete_config` , optionally with the array of IMedium objects which was returned
from this method.
How thoroughly this method cleans up the machine configuration before unregistering
the machine depends on the @a cleanupMode argument.
With "UnregisterOnly", the machine will only be unregistered, but no additional
cleanup will be performed. The call will fail if the machine is in "Saved" state
or has any snapshots or any media attached (see :py:class:`IMediumAttachment` ).
It is the responsibility of the caller to delete all such configuration in this mode.
In this mode, the API behaves like the former @c IVirtualBox::unregisterMachine() API
which it replaces.
With "DetachAllReturnNone", the call will succeed even if the machine is in "Saved"
state or if it has snapshots or media attached. All media attached to the current machine
state or in snapshots will be detached. No medium objects will be returned;
all of the machine's media will remain open.
With "DetachAllReturnHardDisksOnly", the call will behave like with "DetachAllReturnNone",
except that all the hard disk medium objects which were detached from the machine will
be returned as an array. This allows for quickly passing them to the :py:func:`delete_config`
API for closing and deletion.
With "Full", the call will behave like with "DetachAllReturnHardDisksOnly", except
that all media will be returned in the array, including removable media like DVDs and
floppies. This might be useful if the user wants to inspect in detail which media were
attached to the machine. Be careful when passing the media array to :py:func:`delete_config`
in that case because users will typically want to preserve ISO and RAW image files.
A typical implementation will use "DetachAllReturnHardDisksOnly" and then pass the
resulting IMedium array to :py:func:`delete_config` . This way, the machine is completely
deleted with all its saved states and hard disk images, but images for removable
drives (such as ISO and RAW files) will remain on disk.
This API does not verify whether the media files returned in the array are still
attached to other machines (i.e. shared between several machines). If such a shared
image is passed to :py:func:`delete_config` however, closing the image will fail there
and the image will be silently skipped.
This API may, however, move media from this machine's media registry to other media
registries (see :py:class:`IMedium` for details on media registries). For machines
created with VirtualBox 4.0 or later, if media from this machine's media registry
are also attached to another machine (shared attachments), each such medium will be
moved to another machine's registry. This is because without this machine's media
registry, the other machine cannot find its media any more and would become inaccessible.
This API implicitly calls :py:func:`save_settings` to save all current machine settings
before unregistering it. It may also silently call :py:func:`save_settings` on other machines
if media are moved to other machines' media registries.
After successful method invocation, the :py:class:`IMachineRegisteredEvent` event
is fired.
The call will fail if the machine is currently locked (see :py:class:`ISession` ).
If the given machine is inaccessible (see :py:func:`accessible` ), it
will be unregistered and fully uninitialized right afterwards. As a result,
the returned machine object will be unusable and an attempt to call
**any** method will return the "Object not ready" error.
in cleanup_mode of type :class:`CleanupMode`
How to clean up after the machine has been unregistered.
return media of type :class:`IMedium`
List of media detached from the machine, depending on the @a cleanupMode parameter.
raises :class:`VBoxErrorInvalidObjectState`
Machine is currently locked for a session.
"""
if not isinstance(cleanup_mode, CleanupMode):
raise TypeError("cleanup_mode can only be an instance of type CleanupMode")
media = self._call("unregister",
in_p=[cleanup_mode])
media = [IMedium(a) for a in media]
return media | Unregisters a machine previously registered with
:py:func:`IVirtualBox.register_machine` and optionally do additional
cleanup before the machine is unregistered.
This method does not delete any files. It only changes the machine configuration and
the list of registered machines in the VirtualBox object. To delete the files which
belonged to the machine, including the XML file of the machine itself, call
:py:func:`delete_config` , optionally with the array of IMedium objects which was returned
from this method.
How thoroughly this method cleans up the machine configuration before unregistering
the machine depends on the @a cleanupMode argument.
With "UnregisterOnly", the machine will only be unregistered, but no additional
cleanup will be performed. The call will fail if the machine is in "Saved" state
or has any snapshots or any media attached (see :py:class:`IMediumAttachment` ).
It is the responsibility of the caller to delete all such configuration in this mode.
In this mode, the API behaves like the former @c IVirtualBox::unregisterMachine() API
which it replaces.
With "DetachAllReturnNone", the call will succeed even if the machine is in "Saved"
state or if it has snapshots or media attached. All media attached to the current machine
state or in snapshots will be detached. No medium objects will be returned;
all of the machine's media will remain open.
With "DetachAllReturnHardDisksOnly", the call will behave like with "DetachAllReturnNone",
except that all the hard disk medium objects which were detached from the machine will
be returned as an array. This allows for quickly passing them to the :py:func:`delete_config`
API for closing and deletion.
With "Full", the call will behave like with "DetachAllReturnHardDisksOnly", except
that all media will be returned in the array, including removable media like DVDs and
floppies. This might be useful if the user wants to inspect in detail which media were
attached to the machine. Be careful when passing the media array to :py:func:`delete_config`
in that case because users will typically want to preserve ISO and RAW image files.
A typical implementation will use "DetachAllReturnHardDisksOnly" and then pass the
resulting IMedium array to :py:func:`delete_config` . This way, the machine is completely
deleted with all its saved states and hard disk images, but images for removable
drives (such as ISO and RAW files) will remain on disk.
This API does not verify whether the media files returned in the array are still
attached to other machines (i.e. shared between several machines). If such a shared
image is passed to :py:func:`delete_config` however, closing the image will fail there
and the image will be silently skipped.
This API may, however, move media from this machine's media registry to other media
registries (see :py:class:`IMedium` for details on media registries). For machines
created with VirtualBox 4.0 or later, if media from this machine's media registry
are also attached to another machine (shared attachments), each such medium will be
moved to another machine's registry. This is because without this machine's media
registry, the other machine cannot find its media any more and would become inaccessible.
This API implicitly calls :py:func:`save_settings` to save all current machine settings
before unregistering it. It may also silently call :py:func:`save_settings` on other machines
if media are moved to other machines' media registries.
After successful method invocation, the :py:class:`IMachineRegisteredEvent` event
is fired.
The call will fail if the machine is currently locked (see :py:class:`ISession` ).
If the given machine is inaccessible (see :py:func:`accessible` ), it
will be unregistered and fully uninitialized right afterwards. As a result,
the returned machine object will be unusable and an attempt to call
**any** method will return the "Object not ready" error.
in cleanup_mode of type :class:`CleanupMode`
How to clean up after the machine has been unregistered.
return media of type :class:`IMedium`
List of media detached from the machine, depending on the @a cleanupMode parameter.
raises :class:`VBoxErrorInvalidObjectState`
Machine is currently locked for a session. | Below is the the instruction that describes the task:
### Input:
Unregisters a machine previously registered with
:py:func:`IVirtualBox.register_machine` and optionally do additional
cleanup before the machine is unregistered.
This method does not delete any files. It only changes the machine configuration and
the list of registered machines in the VirtualBox object. To delete the files which
belonged to the machine, including the XML file of the machine itself, call
:py:func:`delete_config` , optionally with the array of IMedium objects which was returned
from this method.
How thoroughly this method cleans up the machine configuration before unregistering
the machine depends on the @a cleanupMode argument.
With "UnregisterOnly", the machine will only be unregistered, but no additional
cleanup will be performed. The call will fail if the machine is in "Saved" state
or has any snapshots or any media attached (see :py:class:`IMediumAttachment` ).
It is the responsibility of the caller to delete all such configuration in this mode.
In this mode, the API behaves like the former @c IVirtualBox::unregisterMachine() API
which it replaces.
With "DetachAllReturnNone", the call will succeed even if the machine is in "Saved"
state or if it has snapshots or media attached. All media attached to the current machine
state or in snapshots will be detached. No medium objects will be returned;
all of the machine's media will remain open.
With "DetachAllReturnHardDisksOnly", the call will behave like with "DetachAllReturnNone",
except that all the hard disk medium objects which were detached from the machine will
be returned as an array. This allows for quickly passing them to the :py:func:`delete_config`
API for closing and deletion.
With "Full", the call will behave like with "DetachAllReturnHardDisksOnly", except
that all media will be returned in the array, including removable media like DVDs and
floppies. This might be useful if the user wants to inspect in detail which media were
attached to the machine. Be careful when passing the media array to :py:func:`delete_config`
in that case because users will typically want to preserve ISO and RAW image files.
A typical implementation will use "DetachAllReturnHardDisksOnly" and then pass the
resulting IMedium array to :py:func:`delete_config` . This way, the machine is completely
deleted with all its saved states and hard disk images, but images for removable
drives (such as ISO and RAW files) will remain on disk.
This API does not verify whether the media files returned in the array are still
attached to other machines (i.e. shared between several machines). If such a shared
image is passed to :py:func:`delete_config` however, closing the image will fail there
and the image will be silently skipped.
This API may, however, move media from this machine's media registry to other media
registries (see :py:class:`IMedium` for details on media registries). For machines
created with VirtualBox 4.0 or later, if media from this machine's media registry
are also attached to another machine (shared attachments), each such medium will be
moved to another machine's registry. This is because without this machine's media
registry, the other machine cannot find its media any more and would become inaccessible.
This API implicitly calls :py:func:`save_settings` to save all current machine settings
before unregistering it. It may also silently call :py:func:`save_settings` on other machines
if media are moved to other machines' media registries.
After successful method invocation, the :py:class:`IMachineRegisteredEvent` event
is fired.
The call will fail if the machine is currently locked (see :py:class:`ISession` ).
If the given machine is inaccessible (see :py:func:`accessible` ), it
will be unregistered and fully uninitialized right afterwards. As a result,
the returned machine object will be unusable and an attempt to call
**any** method will return the "Object not ready" error.
in cleanup_mode of type :class:`CleanupMode`
How to clean up after the machine has been unregistered.
return media of type :class:`IMedium`
List of media detached from the machine, depending on the @a cleanupMode parameter.
raises :class:`VBoxErrorInvalidObjectState`
Machine is currently locked for a session.
### Response:
def unregister(self, cleanup_mode):
"""Unregisters a machine previously registered with
:py:func:`IVirtualBox.register_machine` and optionally do additional
cleanup before the machine is unregistered.
This method does not delete any files. It only changes the machine configuration and
the list of registered machines in the VirtualBox object. To delete the files which
belonged to the machine, including the XML file of the machine itself, call
:py:func:`delete_config` , optionally with the array of IMedium objects which was returned
from this method.
How thoroughly this method cleans up the machine configuration before unregistering
the machine depends on the @a cleanupMode argument.
With "UnregisterOnly", the machine will only be unregistered, but no additional
cleanup will be performed. The call will fail if the machine is in "Saved" state
or has any snapshots or any media attached (see :py:class:`IMediumAttachment` ).
It is the responsibility of the caller to delete all such configuration in this mode.
In this mode, the API behaves like the former @c IVirtualBox::unregisterMachine() API
which it replaces.
With "DetachAllReturnNone", the call will succeed even if the machine is in "Saved"
state or if it has snapshots or media attached. All media attached to the current machine
state or in snapshots will be detached. No medium objects will be returned;
all of the machine's media will remain open.
With "DetachAllReturnHardDisksOnly", the call will behave like with "DetachAllReturnNone",
except that all the hard disk medium objects which were detached from the machine will
be returned as an array. This allows for quickly passing them to the :py:func:`delete_config`
API for closing and deletion.
With "Full", the call will behave like with "DetachAllReturnHardDisksOnly", except
that all media will be returned in the array, including removable media like DVDs and
floppies. This might be useful if the user wants to inspect in detail which media were
attached to the machine. Be careful when passing the media array to :py:func:`delete_config`
in that case because users will typically want to preserve ISO and RAW image files.
A typical implementation will use "DetachAllReturnHardDisksOnly" and then pass the
resulting IMedium array to :py:func:`delete_config` . This way, the machine is completely
deleted with all its saved states and hard disk images, but images for removable
drives (such as ISO and RAW files) will remain on disk.
This API does not verify whether the media files returned in the array are still
attached to other machines (i.e. shared between several machines). If such a shared
image is passed to :py:func:`delete_config` however, closing the image will fail there
and the image will be silently skipped.
This API may, however, move media from this machine's media registry to other media
registries (see :py:class:`IMedium` for details on media registries). For machines
created with VirtualBox 4.0 or later, if media from this machine's media registry
are also attached to another machine (shared attachments), each such medium will be
moved to another machine's registry. This is because without this machine's media
registry, the other machine cannot find its media any more and would become inaccessible.
This API implicitly calls :py:func:`save_settings` to save all current machine settings
before unregistering it. It may also silently call :py:func:`save_settings` on other machines
if media are moved to other machines' media registries.
After successful method invocation, the :py:class:`IMachineRegisteredEvent` event
is fired.
The call will fail if the machine is currently locked (see :py:class:`ISession` ).
If the given machine is inaccessible (see :py:func:`accessible` ), it
will be unregistered and fully uninitialized right afterwards. As a result,
the returned machine object will be unusable and an attempt to call
**any** method will return the "Object not ready" error.
in cleanup_mode of type :class:`CleanupMode`
How to clean up after the machine has been unregistered.
return media of type :class:`IMedium`
List of media detached from the machine, depending on the @a cleanupMode parameter.
raises :class:`VBoxErrorInvalidObjectState`
Machine is currently locked for a session.
"""
if not isinstance(cleanup_mode, CleanupMode):
raise TypeError("cleanup_mode can only be an instance of type CleanupMode")
media = self._call("unregister",
in_p=[cleanup_mode])
media = [IMedium(a) for a in media]
return media |
def removed_issues(self, board_id, sprint_id):
"""Return the completed issues for the sprint."""
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['puntedIssues']]
return issues | Return the completed issues for the sprint. | Below is the the instruction that describes the task:
### Input:
Return the completed issues for the sprint.
### Response:
def removed_issues(self, board_id, sprint_id):
"""Return the completed issues for the sprint."""
r_json = self._get_json('rapid/charts/sprintreport?rapidViewId=%s&sprintId=%s' % (board_id, sprint_id),
base=self.AGILE_BASE_URL)
issues = [Issue(self._options, self._session, raw_issues_json) for raw_issues_json in
r_json['contents']['puntedIssues']]
return issues |
def get_column(self, column_name, column_type, index, verbose=True):
"""Summary
Args:
column_name (TYPE): Description
column_type (TYPE): Description
index (TYPE): Description
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.get_column(
self.expr,
self.weld_type,
index
),
column_type,
1
) | Summary
Args:
column_name (TYPE): Description
column_type (TYPE): Description
index (TYPE): Description
Returns:
TYPE: Description | Below is the the instruction that describes the task:
### Input:
Summary
Args:
column_name (TYPE): Description
column_type (TYPE): Description
index (TYPE): Description
Returns:
TYPE: Description
### Response:
def get_column(self, column_name, column_type, index, verbose=True):
"""Summary
Args:
column_name (TYPE): Description
column_type (TYPE): Description
index (TYPE): Description
Returns:
TYPE: Description
"""
return LazyOpResult(
grizzly_impl.get_column(
self.expr,
self.weld_type,
index
),
column_type,
1
) |
def calcCodePageRanges(unicodes):
""" Given a set of Unicode codepoints (integers), calculate the
corresponding OS/2 CodePage range bits.
This is a direct translation of FontForge implementation:
https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158
"""
codepageRanges = set()
chars = [unichr(u) for u in unicodes]
hasAscii = set(range(0x20, 0x7E)).issubset(unicodes)
hasLineart = "┤" in chars
for char in chars:
if char == "Þ" and hasAscii:
codepageRanges.add(0) # Latin 1
elif char == "Ľ" and hasAscii:
codepageRanges.add(1) # Latin 2: Eastern Europe
if hasLineart:
codepageRanges.add(58) # Latin 2
elif char == "Б":
codepageRanges.add(2) # Cyrillic
if "Ѕ" in chars and hasLineart:
codepageRanges.add(57) # IBM Cyrillic
if "╜" in chars and hasLineart:
codepageRanges.add(49) # MS-DOS Russian
elif char == "Ά":
codepageRanges.add(3) # Greek
if hasLineart and "½" in chars:
codepageRanges.add(48) # IBM Greek
if hasLineart and "√" in chars:
codepageRanges.add(60) # Greek, former 437 G
elif char == "İ" and hasAscii:
codepageRanges.add(4) # Turkish
if hasLineart:
codepageRanges.add(56) # IBM turkish
elif char == "א":
codepageRanges.add(5) # Hebrew
if hasLineart and "√" in chars:
codepageRanges.add(53) # Hebrew
elif char == "ر":
codepageRanges.add(6) # Arabic
if "√" in chars:
codepageRanges.add(51) # Arabic
if hasLineart:
codepageRanges.add(61) # Arabic; ASMO 708
elif char == "ŗ" and hasAscii:
codepageRanges.add(7) # Windows Baltic
if hasLineart:
codepageRanges.add(59) # MS-DOS Baltic
elif char == "₫" and hasAscii:
codepageRanges.add(8) # Vietnamese
elif char == "ๅ":
codepageRanges.add(16) # Thai
elif char == "エ":
codepageRanges.add(17) # JIS/Japan
elif char == "ㄅ":
codepageRanges.add(18) # Chinese: Simplified chars
elif char == "ㄱ":
codepageRanges.add(19) # Korean wansung
elif char == "央":
codepageRanges.add(20) # Chinese: Traditional chars
elif char == "곴":
codepageRanges.add(21) # Korean Johab
elif char == "♥" and hasAscii:
codepageRanges.add(30) # OEM Character Set
# TODO: Symbol bit has a special meaning (check the spec), we need
# to confirm if this is wanted by default.
# elif unichr(0xF000) <= char <= unichr(0xF0FF):
# codepageRanges.add(31) # Symbol Character Set
elif char == "þ" and hasAscii and hasLineart:
codepageRanges.add(54) # MS-DOS Icelandic
elif char == "╚" and hasAscii:
codepageRanges.add(62) # WE/Latin 1
codepageRanges.add(63) # US
elif hasAscii and hasLineart and "√" in chars:
if char == "Å":
codepageRanges.add(50) # MS-DOS Nordic
elif char == "é":
codepageRanges.add(52) # MS-DOS Canadian French
elif char == "õ":
codepageRanges.add(55) # MS-DOS Portuguese
if hasAscii and "‰" in chars and "∑" in chars:
codepageRanges.add(29) # Macintosh Character Set (US Roman)
# when no codepage ranges can be enabled, fall back to enabling bit 0
# (Latin 1) so that the font works in MS Word:
# https://github.com/googlei18n/fontmake/issues/468
if not codepageRanges:
codepageRanges.add(0)
return codepageRanges | Given a set of Unicode codepoints (integers), calculate the
corresponding OS/2 CodePage range bits.
This is a direct translation of FontForge implementation:
https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158 | Below is the the instruction that describes the task:
### Input:
Given a set of Unicode codepoints (integers), calculate the
corresponding OS/2 CodePage range bits.
This is a direct translation of FontForge implementation:
https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158
### Response:
def calcCodePageRanges(unicodes):
""" Given a set of Unicode codepoints (integers), calculate the
corresponding OS/2 CodePage range bits.
This is a direct translation of FontForge implementation:
https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158
"""
codepageRanges = set()
chars = [unichr(u) for u in unicodes]
hasAscii = set(range(0x20, 0x7E)).issubset(unicodes)
hasLineart = "┤" in chars
for char in chars:
if char == "Þ" and hasAscii:
codepageRanges.add(0) # Latin 1
elif char == "Ľ" and hasAscii:
codepageRanges.add(1) # Latin 2: Eastern Europe
if hasLineart:
codepageRanges.add(58) # Latin 2
elif char == "Б":
codepageRanges.add(2) # Cyrillic
if "Ѕ" in chars and hasLineart:
codepageRanges.add(57) # IBM Cyrillic
if "╜" in chars and hasLineart:
codepageRanges.add(49) # MS-DOS Russian
elif char == "Ά":
codepageRanges.add(3) # Greek
if hasLineart and "½" in chars:
codepageRanges.add(48) # IBM Greek
if hasLineart and "√" in chars:
codepageRanges.add(60) # Greek, former 437 G
elif char == "İ" and hasAscii:
codepageRanges.add(4) # Turkish
if hasLineart:
codepageRanges.add(56) # IBM turkish
elif char == "א":
codepageRanges.add(5) # Hebrew
if hasLineart and "√" in chars:
codepageRanges.add(53) # Hebrew
elif char == "ر":
codepageRanges.add(6) # Arabic
if "√" in chars:
codepageRanges.add(51) # Arabic
if hasLineart:
codepageRanges.add(61) # Arabic; ASMO 708
elif char == "ŗ" and hasAscii:
codepageRanges.add(7) # Windows Baltic
if hasLineart:
codepageRanges.add(59) # MS-DOS Baltic
elif char == "₫" and hasAscii:
codepageRanges.add(8) # Vietnamese
elif char == "ๅ":
codepageRanges.add(16) # Thai
elif char == "エ":
codepageRanges.add(17) # JIS/Japan
elif char == "ㄅ":
codepageRanges.add(18) # Chinese: Simplified chars
elif char == "ㄱ":
codepageRanges.add(19) # Korean wansung
elif char == "央":
codepageRanges.add(20) # Chinese: Traditional chars
elif char == "곴":
codepageRanges.add(21) # Korean Johab
elif char == "♥" and hasAscii:
codepageRanges.add(30) # OEM Character Set
# TODO: Symbol bit has a special meaning (check the spec), we need
# to confirm if this is wanted by default.
# elif unichr(0xF000) <= char <= unichr(0xF0FF):
# codepageRanges.add(31) # Symbol Character Set
elif char == "þ" and hasAscii and hasLineart:
codepageRanges.add(54) # MS-DOS Icelandic
elif char == "╚" and hasAscii:
codepageRanges.add(62) # WE/Latin 1
codepageRanges.add(63) # US
elif hasAscii and hasLineart and "√" in chars:
if char == "Å":
codepageRanges.add(50) # MS-DOS Nordic
elif char == "é":
codepageRanges.add(52) # MS-DOS Canadian French
elif char == "õ":
codepageRanges.add(55) # MS-DOS Portuguese
if hasAscii and "‰" in chars and "∑" in chars:
codepageRanges.add(29) # Macintosh Character Set (US Roman)
# when no codepage ranges can be enabled, fall back to enabling bit 0
# (Latin 1) so that the font works in MS Word:
# https://github.com/googlei18n/fontmake/issues/468
if not codepageRanges:
codepageRanges.add(0)
return codepageRanges |
def chi2(g1, g2=None, svdcut=1e-12, nocorr=False):
""" Compute chi**2 of ``g1-g2``.
``chi**2`` is a measure of whether the multi-dimensional
Gaussian distributions ``g1`` and ``g2`` (dictionaries or arrays)
agree with each other --- that is, do their means agree
within errors for corresponding elements. The probability is high
if ``chi2(g1,g2)/chi2.dof`` is of order 1 or smaller.
Usually ``g1`` and ``g2`` are dictionaries with the same keys,
where ``g1[k]`` and ``g2[k]`` are |GVar|\s or arrays of
|GVar|\s having the same shape. Alternatively ``g1`` and ``g2``
can be |GVar|\s, or arrays of |GVar|\s having the same shape.
One of ``g1`` or ``g2`` can contain numbers instead of |GVar|\s,
in which case ``chi**2`` is a measure of the likelihood that
the numbers came from the distribution specified by the other
argument.
One or the other of ``g1`` or ``g2`` can be missing keys, or missing
elements from arrays. Only the parts of ``g1`` and ``g2`` that
overlap are used. Also setting ``g2=None`` is equivalent to replacing its
elements by zeros.
``chi**2`` is computed from the inverse of the covariance matrix
of ``g1-g2``. The matrix inversion can be sensitive to roundoff
errors. In such cases, SVD cuts can be applied by setting
parameters ``svdcut``; see the documentation
for :func:`gvar.svd`, which is used to apply the cut.
The return value is the ``chi**2``. Extra attributes attached to this
value give additional information:
- **dof** --- Number of degrees of freedom (that is, the number of variables
compared).
- **Q** --- The probability that the ``chi**2`` could have been larger,
by chance, even if ``g1`` and ``g2`` agree. Values smaller than 0.1
or so suggest that they do not agree. Also called the *p-value*.
"""
# customized class for answer
class ans(float):
def __new__(cls, chi2, dof, Q):
return float.__new__(cls, chi2)
def __init__(self, chi2, dof, Q):
self.dof = dof
self.Q = Q
self.chi2 = chi2
# leaving nocorr (turn off correlations) undocumented because I
# suspect I will remove it
if g2 is None:
diff = BufferDict(g1).buf if hasattr(g1, 'keys') else numpy.asarray(g1).flatten()
elif hasattr(g1, 'keys') and hasattr(g2, 'keys'):
# g1 and g2 are dictionaries
g1 = BufferDict(g1)
g2 = BufferDict(g2)
diff = BufferDict()
keys = set(g1.keys())
keys = keys.intersection(g2.keys())
for k in keys:
g1k = g1[k]
g2k = g2[k]
shape = tuple(
[min(s1,s2) for s1, s2 in zip(numpy.shape(g1k), numpy.shape(g2k))]
)
diff[k] = numpy.zeros(shape, object)
if len(shape) == 0:
diff[k] = g1k - g2k
else:
for i in numpy.ndindex(shape):
diff[k][i] = g1k[i] - g2k[i]
diff = diff.buf
elif not hasattr(g1, 'keys') and not hasattr(g2, 'keys'):
# g1 and g2 are arrays or scalars
g1 = numpy.asarray(g1)
g2 = numpy.asarray(g2)
shape = tuple(
[min(s1,s2) for s1, s2 in zip(numpy.shape(g1), numpy.shape(g2))]
)
diff = numpy.zeros(shape, object)
if len(shape) == 0:
diff = numpy.array(g1 - g2)
else:
for i in numpy.ndindex(shape):
diff[i] = g1[i] - g2[i]
diff = diff.flatten()
else:
# g1 and g2 are something else
raise ValueError(
'cannot compute chi**2 for types ' + str(type(g1)) + ' ' +
str(type(g2))
)
dof = diff.size
if dof == 0:
return ans(0.0, 0, 0)
if nocorr:
# ignore correlations
chi2 = numpy.sum(mean(diff) ** 2 / var(diff))
dof = len(diff)
else:
diffmod, i_wgts = svd(diff, svdcut=svdcut, wgts=-1)
diffmean = mean(diffmod)
i, wgts = i_wgts[0]
chi2 = 0.0
if len(i) > 0:
chi2 += numpy.sum((diffmean[i] * wgts) ** 2)
for i, wgts in i_wgts[1:]:
chi2 += numpy.sum(wgts.dot(diffmean[i]) ** 2)
dof = sum(len(wgts) for i, wgts in i_wgts)
Q = gammaQ(dof/2., chi2/2.)
return ans(chi2, dof=dof, Q=Q) | Compute chi**2 of ``g1-g2``.
``chi**2`` is a measure of whether the multi-dimensional
Gaussian distributions ``g1`` and ``g2`` (dictionaries or arrays)
agree with each other --- that is, do their means agree
within errors for corresponding elements. The probability is high
if ``chi2(g1,g2)/chi2.dof`` is of order 1 or smaller.
Usually ``g1`` and ``g2`` are dictionaries with the same keys,
where ``g1[k]`` and ``g2[k]`` are |GVar|\s or arrays of
|GVar|\s having the same shape. Alternatively ``g1`` and ``g2``
can be |GVar|\s, or arrays of |GVar|\s having the same shape.
One of ``g1`` or ``g2`` can contain numbers instead of |GVar|\s,
in which case ``chi**2`` is a measure of the likelihood that
the numbers came from the distribution specified by the other
argument.
One or the other of ``g1`` or ``g2`` can be missing keys, or missing
elements from arrays. Only the parts of ``g1`` and ``g2`` that
overlap are used. Also setting ``g2=None`` is equivalent to replacing its
elements by zeros.
``chi**2`` is computed from the inverse of the covariance matrix
of ``g1-g2``. The matrix inversion can be sensitive to roundoff
errors. In such cases, SVD cuts can be applied by setting
parameters ``svdcut``; see the documentation
for :func:`gvar.svd`, which is used to apply the cut.
The return value is the ``chi**2``. Extra attributes attached to this
value give additional information:
- **dof** --- Number of degrees of freedom (that is, the number of variables
compared).
- **Q** --- The probability that the ``chi**2`` could have been larger,
by chance, even if ``g1`` and ``g2`` agree. Values smaller than 0.1
or so suggest that they do not agree. Also called the *p-value*. | Below is the the instruction that describes the task:
### Input:
Compute chi**2 of ``g1-g2``.
``chi**2`` is a measure of whether the multi-dimensional
Gaussian distributions ``g1`` and ``g2`` (dictionaries or arrays)
agree with each other --- that is, do their means agree
within errors for corresponding elements. The probability is high
if ``chi2(g1,g2)/chi2.dof`` is of order 1 or smaller.
Usually ``g1`` and ``g2`` are dictionaries with the same keys,
where ``g1[k]`` and ``g2[k]`` are |GVar|\s or arrays of
|GVar|\s having the same shape. Alternatively ``g1`` and ``g2``
can be |GVar|\s, or arrays of |GVar|\s having the same shape.
One of ``g1`` or ``g2`` can contain numbers instead of |GVar|\s,
in which case ``chi**2`` is a measure of the likelihood that
the numbers came from the distribution specified by the other
argument.
One or the other of ``g1`` or ``g2`` can be missing keys, or missing
elements from arrays. Only the parts of ``g1`` and ``g2`` that
overlap are used. Also setting ``g2=None`` is equivalent to replacing its
elements by zeros.
``chi**2`` is computed from the inverse of the covariance matrix
of ``g1-g2``. The matrix inversion can be sensitive to roundoff
errors. In such cases, SVD cuts can be applied by setting
parameters ``svdcut``; see the documentation
for :func:`gvar.svd`, which is used to apply the cut.
The return value is the ``chi**2``. Extra attributes attached to this
value give additional information:
- **dof** --- Number of degrees of freedom (that is, the number of variables
compared).
- **Q** --- The probability that the ``chi**2`` could have been larger,
by chance, even if ``g1`` and ``g2`` agree. Values smaller than 0.1
or so suggest that they do not agree. Also called the *p-value*.
### Response:
def chi2(g1, g2=None, svdcut=1e-12, nocorr=False):
""" Compute chi**2 of ``g1-g2``.
``chi**2`` is a measure of whether the multi-dimensional
Gaussian distributions ``g1`` and ``g2`` (dictionaries or arrays)
agree with each other --- that is, do their means agree
within errors for corresponding elements. The probability is high
if ``chi2(g1,g2)/chi2.dof`` is of order 1 or smaller.
Usually ``g1`` and ``g2`` are dictionaries with the same keys,
where ``g1[k]`` and ``g2[k]`` are |GVar|\s or arrays of
|GVar|\s having the same shape. Alternatively ``g1`` and ``g2``
can be |GVar|\s, or arrays of |GVar|\s having the same shape.
One of ``g1`` or ``g2`` can contain numbers instead of |GVar|\s,
in which case ``chi**2`` is a measure of the likelihood that
the numbers came from the distribution specified by the other
argument.
One or the other of ``g1`` or ``g2`` can be missing keys, or missing
elements from arrays. Only the parts of ``g1`` and ``g2`` that
overlap are used. Also setting ``g2=None`` is equivalent to replacing its
elements by zeros.
``chi**2`` is computed from the inverse of the covariance matrix
of ``g1-g2``. The matrix inversion can be sensitive to roundoff
errors. In such cases, SVD cuts can be applied by setting
parameters ``svdcut``; see the documentation
for :func:`gvar.svd`, which is used to apply the cut.
The return value is the ``chi**2``. Extra attributes attached to this
value give additional information:
- **dof** --- Number of degrees of freedom (that is, the number of variables
compared).
- **Q** --- The probability that the ``chi**2`` could have been larger,
by chance, even if ``g1`` and ``g2`` agree. Values smaller than 0.1
or so suggest that they do not agree. Also called the *p-value*.
"""
# customized class for answer
class ans(float):
def __new__(cls, chi2, dof, Q):
return float.__new__(cls, chi2)
def __init__(self, chi2, dof, Q):
self.dof = dof
self.Q = Q
self.chi2 = chi2
# leaving nocorr (turn off correlations) undocumented because I
# suspect I will remove it
if g2 is None:
diff = BufferDict(g1).buf if hasattr(g1, 'keys') else numpy.asarray(g1).flatten()
elif hasattr(g1, 'keys') and hasattr(g2, 'keys'):
# g1 and g2 are dictionaries
g1 = BufferDict(g1)
g2 = BufferDict(g2)
diff = BufferDict()
keys = set(g1.keys())
keys = keys.intersection(g2.keys())
for k in keys:
g1k = g1[k]
g2k = g2[k]
shape = tuple(
[min(s1,s2) for s1, s2 in zip(numpy.shape(g1k), numpy.shape(g2k))]
)
diff[k] = numpy.zeros(shape, object)
if len(shape) == 0:
diff[k] = g1k - g2k
else:
for i in numpy.ndindex(shape):
diff[k][i] = g1k[i] - g2k[i]
diff = diff.buf
elif not hasattr(g1, 'keys') and not hasattr(g2, 'keys'):
# g1 and g2 are arrays or scalars
g1 = numpy.asarray(g1)
g2 = numpy.asarray(g2)
shape = tuple(
[min(s1,s2) for s1, s2 in zip(numpy.shape(g1), numpy.shape(g2))]
)
diff = numpy.zeros(shape, object)
if len(shape) == 0:
diff = numpy.array(g1 - g2)
else:
for i in numpy.ndindex(shape):
diff[i] = g1[i] - g2[i]
diff = diff.flatten()
else:
# g1 and g2 are something else
raise ValueError(
'cannot compute chi**2 for types ' + str(type(g1)) + ' ' +
str(type(g2))
)
dof = diff.size
if dof == 0:
return ans(0.0, 0, 0)
if nocorr:
# ignore correlations
chi2 = numpy.sum(mean(diff) ** 2 / var(diff))
dof = len(diff)
else:
diffmod, i_wgts = svd(diff, svdcut=svdcut, wgts=-1)
diffmean = mean(diffmod)
i, wgts = i_wgts[0]
chi2 = 0.0
if len(i) > 0:
chi2 += numpy.sum((diffmean[i] * wgts) ** 2)
for i, wgts in i_wgts[1:]:
chi2 += numpy.sum(wgts.dot(diffmean[i]) ** 2)
dof = sum(len(wgts) for i, wgts in i_wgts)
Q = gammaQ(dof/2., chi2/2.)
return ans(chi2, dof=dof, Q=Q) |
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart VRF search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_vrf` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_vrf(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['error'] = smart_result['error']
if 'error_message' in smart_result:
result['error_message'] = smart_result['error_message']
result['result'] = list()
for v in smart_result['result']:
result['result'].append(VRF.from_dict(v))
return result | Perform a smart VRF search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_vrf` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values. | Below is the the instruction that describes the task:
### Input:
Perform a smart VRF search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_vrf` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
### Response:
def smart_search(cls, query_string, search_options=None, extra_query = None):
""" Perform a smart VRF search.
Maps to the function
:py:func:`nipap.backend.Nipap.smart_search_vrf` in the backend.
Please see the documentation for the backend function for
information regarding input arguments and return values.
"""
if search_options is None:
search_options = {}
xmlrpc = XMLRPCConnection()
try:
smart_result = xmlrpc.connection.smart_search_vrf(
{
'query_string': query_string,
'search_options': search_options,
'auth': AuthOptions().options,
'extra_query': extra_query
})
except xmlrpclib.Fault as xml_fault:
raise _fault_to_exception(xml_fault)
result = dict()
result['interpretation'] = smart_result['interpretation']
result['search_options'] = smart_result['search_options']
result['error'] = smart_result['error']
if 'error_message' in smart_result:
result['error_message'] = smart_result['error_message']
result['result'] = list()
for v in smart_result['result']:
result['result'].append(VRF.from_dict(v))
return result |
def log_histograms(self, model: Model, histogram_parameters: Set[str]) -> None:
"""
Send histograms of parameters to tensorboard.
"""
for name, param in model.named_parameters():
if name in histogram_parameters:
self.add_train_histogram("parameter_histogram/" + name, param) | Send histograms of parameters to tensorboard. | Below is the the instruction that describes the task:
### Input:
Send histograms of parameters to tensorboard.
### Response:
def log_histograms(self, model: Model, histogram_parameters: Set[str]) -> None:
"""
Send histograms of parameters to tensorboard.
"""
for name, param in model.named_parameters():
if name in histogram_parameters:
self.add_train_histogram("parameter_histogram/" + name, param) |
def _add_header_domains_xml(self, document):
"""
Generates the XML elements for allowed header domains.
"""
for domain, attrs in self.header_domains.items():
header_element = document.createElement(
'allow-http-request-headers-from'
)
header_element.setAttribute('domain', domain)
header_element.setAttribute('headers', ','.join(attrs['headers']))
if not attrs['secure']:
header_element.setAttribute('secure', 'false')
document.documentElement.appendChild(header_element) | Generates the XML elements for allowed header domains. | Below is the the instruction that describes the task:
### Input:
Generates the XML elements for allowed header domains.
### Response:
def _add_header_domains_xml(self, document):
"""
Generates the XML elements for allowed header domains.
"""
for domain, attrs in self.header_domains.items():
header_element = document.createElement(
'allow-http-request-headers-from'
)
header_element.setAttribute('domain', domain)
header_element.setAttribute('headers', ','.join(attrs['headers']))
if not attrs['secure']:
header_element.setAttribute('secure', 'false')
document.documentElement.appendChild(header_element) |
def filter_useless_pass(source):
"""Yield code with useless "pass" lines removed."""
try:
marked_lines = frozenset(useless_pass_line_numbers(source))
except (SyntaxError, tokenize.TokenError):
marked_lines = frozenset()
sio = io.StringIO(source)
for line_number, line in enumerate(sio.readlines(), start=1):
if line_number not in marked_lines:
yield line | Yield code with useless "pass" lines removed. | Below is the the instruction that describes the task:
### Input:
Yield code with useless "pass" lines removed.
### Response:
def filter_useless_pass(source):
"""Yield code with useless "pass" lines removed."""
try:
marked_lines = frozenset(useless_pass_line_numbers(source))
except (SyntaxError, tokenize.TokenError):
marked_lines = frozenset()
sio = io.StringIO(source)
for line_number, line in enumerate(sio.readlines(), start=1):
if line_number not in marked_lines:
yield line |
def parse(station: str, txt: str) -> (MetarData, Units): # type: ignore
"""
Returns MetarData and Units dataclasses with parsed data and their associated units
"""
core.valid_station(station)
return parse_na(txt) if core.uses_na_format(station[:2]) else parse_in(txt) | Returns MetarData and Units dataclasses with parsed data and their associated units | Below is the the instruction that describes the task:
### Input:
Returns MetarData and Units dataclasses with parsed data and their associated units
### Response:
def parse(station: str, txt: str) -> (MetarData, Units): # type: ignore
"""
Returns MetarData and Units dataclasses with parsed data and their associated units
"""
core.valid_station(station)
return parse_na(txt) if core.uses_na_format(station[:2]) else parse_in(txt) |
def listdir(self, folder_id='0', offset=None, limit=None, fields=None):
'Get Box object, representing list of objects in a folder.'
if fields is not None\
and not isinstance(fields, types.StringTypes): fields = ','.join(fields)
return self(
join('folders', folder_id, 'items'),
dict(offset=offset, limit=limit, fields=fields) ) | Get Box object, representing list of objects in a folder. | Below is the the instruction that describes the task:
### Input:
Get Box object, representing list of objects in a folder.
### Response:
def listdir(self, folder_id='0', offset=None, limit=None, fields=None):
'Get Box object, representing list of objects in a folder.'
if fields is not None\
and not isinstance(fields, types.StringTypes): fields = ','.join(fields)
return self(
join('folders', folder_id, 'items'),
dict(offset=offset, limit=limit, fields=fields) ) |
def get_single_submission_courses(self, user_id, course_id, assignment_id, include=None):
"""
Get a single submission.
Get a single submission, based on user id.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - assignment_id
"""ID"""
path["assignment_id"] = assignment_id
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - include
"""Associations to include with the group."""
if include is not None:
self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "visibility", "course", "user"])
params["include"] = include
self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True) | Get a single submission.
Get a single submission, based on user id. | Below is the the instruction that describes the task:
### Input:
Get a single submission.
Get a single submission, based on user id.
### Response:
def get_single_submission_courses(self, user_id, course_id, assignment_id, include=None):
"""
Get a single submission.
Get a single submission, based on user id.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - assignment_id
"""ID"""
path["assignment_id"] = assignment_id
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# OPTIONAL - include
"""Associations to include with the group."""
if include is not None:
self._validate_enum(include, ["submission_history", "submission_comments", "rubric_assessment", "visibility", "course", "user"])
params["include"] = include
self.logger.debug("GET /api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments/{assignment_id}/submissions/{user_id}".format(**path), data=data, params=params, no_data=True) |
def json(self):
"""Produce a JSON representation for the web interface"""
d = { 'id': self.id, 'format': self.formatclass.__name__,'label': self.label, 'mimetype': self.formatclass.mimetype, 'schema': self.formatclass.schema }
if self.unique:
d['unique'] = True
if self.filename:
d['filename'] = self.filename
if self.extension:
d['extension'] = self.extension
if self.acceptarchive:
d['acceptarchive'] = self.acceptarchive
#d['parameters'] = {}
#The actual parameters are included as XML, and transformed by clam.js using XSLT (parameter.xsl) to generate the forms
parametersxml = ''
for parameter in self.parameters:
parametersxml += parameter.xml()
d['parametersxml'] = '<?xml version="1.0" encoding="utf-8" ?><parameters>' + parametersxml + '</parameters>'
d['converters'] = [ {'id':x.id, 'label':x.label} for x in self.converters ]
d['inputsources'] = [ {'id':x.id, 'label':x.label} for x in self.inputsources ]
return json.dumps(d) | Produce a JSON representation for the web interface | Below is the the instruction that describes the task:
### Input:
Produce a JSON representation for the web interface
### Response:
def json(self):
"""Produce a JSON representation for the web interface"""
d = { 'id': self.id, 'format': self.formatclass.__name__,'label': self.label, 'mimetype': self.formatclass.mimetype, 'schema': self.formatclass.schema }
if self.unique:
d['unique'] = True
if self.filename:
d['filename'] = self.filename
if self.extension:
d['extension'] = self.extension
if self.acceptarchive:
d['acceptarchive'] = self.acceptarchive
#d['parameters'] = {}
#The actual parameters are included as XML, and transformed by clam.js using XSLT (parameter.xsl) to generate the forms
parametersxml = ''
for parameter in self.parameters:
parametersxml += parameter.xml()
d['parametersxml'] = '<?xml version="1.0" encoding="utf-8" ?><parameters>' + parametersxml + '</parameters>'
d['converters'] = [ {'id':x.id, 'label':x.label} for x in self.converters ]
d['inputsources'] = [ {'id':x.id, 'label':x.label} for x in self.inputsources ]
return json.dumps(d) |
def clean_path(p):
""" Clean a path by expanding user and environment variables and
ensuring absolute path.
"""
p = os.path.expanduser(p)
p = os.path.expandvars(p)
p = os.path.abspath(p)
return p | Clean a path by expanding user and environment variables and
ensuring absolute path. | Below is the the instruction that describes the task:
### Input:
Clean a path by expanding user and environment variables and
ensuring absolute path.
### Response:
def clean_path(p):
""" Clean a path by expanding user and environment variables and
ensuring absolute path.
"""
p = os.path.expanduser(p)
p = os.path.expandvars(p)
p = os.path.abspath(p)
return p |
def move(self, x = None, y = None, width = None, height = None,
bRepaint = True):
"""
Moves and/or resizes the window.
@note: This is request is performed syncronously.
@type x: int
@param x: (Optional) New horizontal coordinate.
@type y: int
@param y: (Optional) New vertical coordinate.
@type width: int
@param width: (Optional) Desired window width.
@type height: int
@param height: (Optional) Desired window height.
@type bRepaint: bool
@param bRepaint:
(Optional) C{True} if the window should be redrawn afterwards.
@raise WindowsError: An error occured while processing this request.
"""
if None in (x, y, width, height):
rect = self.get_screen_rect()
if x is None:
x = rect.left
if y is None:
y = rect.top
if width is None:
width = rect.right - rect.left
if height is None:
height = rect.bottom - rect.top
win32.MoveWindow(self.get_handle(), x, y, width, height, bRepaint) | Moves and/or resizes the window.
@note: This is request is performed syncronously.
@type x: int
@param x: (Optional) New horizontal coordinate.
@type y: int
@param y: (Optional) New vertical coordinate.
@type width: int
@param width: (Optional) Desired window width.
@type height: int
@param height: (Optional) Desired window height.
@type bRepaint: bool
@param bRepaint:
(Optional) C{True} if the window should be redrawn afterwards.
@raise WindowsError: An error occured while processing this request. | Below is the the instruction that describes the task:
### Input:
Moves and/or resizes the window.
@note: This is request is performed syncronously.
@type x: int
@param x: (Optional) New horizontal coordinate.
@type y: int
@param y: (Optional) New vertical coordinate.
@type width: int
@param width: (Optional) Desired window width.
@type height: int
@param height: (Optional) Desired window height.
@type bRepaint: bool
@param bRepaint:
(Optional) C{True} if the window should be redrawn afterwards.
@raise WindowsError: An error occured while processing this request.
### Response:
def move(self, x = None, y = None, width = None, height = None,
bRepaint = True):
"""
Moves and/or resizes the window.
@note: This is request is performed syncronously.
@type x: int
@param x: (Optional) New horizontal coordinate.
@type y: int
@param y: (Optional) New vertical coordinate.
@type width: int
@param width: (Optional) Desired window width.
@type height: int
@param height: (Optional) Desired window height.
@type bRepaint: bool
@param bRepaint:
(Optional) C{True} if the window should be redrawn afterwards.
@raise WindowsError: An error occured while processing this request.
"""
if None in (x, y, width, height):
rect = self.get_screen_rect()
if x is None:
x = rect.left
if y is None:
y = rect.top
if width is None:
width = rect.right - rect.left
if height is None:
height = rect.bottom - rect.top
win32.MoveWindow(self.get_handle(), x, y, width, height, bRepaint) |
def bandpass(data, freqmin, freqmax, df, corners=4, zerophase=True):
"""
Butterworth-Bandpass Filter.
Filter data from ``freqmin`` to ``freqmax`` using ``corners``
corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freqmin: Pass band low corner frequency.
:param freqmax: Pass band high corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the filter order but zero phase shift in
the resulting filtered trace.
:return: Filtered data.
"""
fe = 0.5 * df
low = freqmin / fe
high = freqmax / fe
# raise for some bad scenarios
if high - 1.0 > -1e-6:
msg = ("Selected high corner frequency ({}) of bandpass is at or "
"above Nyquist ({}). Applying a high-pass instead.").format(
freqmax, fe)
warnings.warn(msg)
return highpass(data, freq=freqmin, df=df, corners=corners,
zerophase=zerophase)
if low > 1:
msg = "Selected low corner frequency is above Nyquist."
raise ValueError(msg)
z, p, k = iirfilter(corners, [low, high], btype='band',
ftype='butter', output='zpk')
sos = zpk2sos(z, p, k)
if zerophase:
firstpass = sosfilt(sos, data)
return sosfilt(sos, firstpass[::-1])[::-1]
else:
return sosfilt(sos, data) | Butterworth-Bandpass Filter.
Filter data from ``freqmin`` to ``freqmax`` using ``corners``
corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freqmin: Pass band low corner frequency.
:param freqmax: Pass band high corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the filter order but zero phase shift in
the resulting filtered trace.
:return: Filtered data. | Below is the the instruction that describes the task:
### Input:
Butterworth-Bandpass Filter.
Filter data from ``freqmin`` to ``freqmax`` using ``corners``
corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freqmin: Pass band low corner frequency.
:param freqmax: Pass band high corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the filter order but zero phase shift in
the resulting filtered trace.
:return: Filtered data.
### Response:
def bandpass(data, freqmin, freqmax, df, corners=4, zerophase=True):
"""
Butterworth-Bandpass Filter.
Filter data from ``freqmin`` to ``freqmax`` using ``corners``
corners.
The filter uses :func:`scipy.signal.iirfilter` (for design)
and :func:`scipy.signal.sosfilt` (for applying the filter).
:type data: numpy.ndarray
:param data: Data to filter.
:param freqmin: Pass band low corner frequency.
:param freqmax: Pass band high corner frequency.
:param df: Sampling rate in Hz.
:param corners: Filter corners / order.
:param zerophase: If True, apply filter once forwards and once backwards.
This results in twice the filter order but zero phase shift in
the resulting filtered trace.
:return: Filtered data.
"""
fe = 0.5 * df
low = freqmin / fe
high = freqmax / fe
# raise for some bad scenarios
if high - 1.0 > -1e-6:
msg = ("Selected high corner frequency ({}) of bandpass is at or "
"above Nyquist ({}). Applying a high-pass instead.").format(
freqmax, fe)
warnings.warn(msg)
return highpass(data, freq=freqmin, df=df, corners=corners,
zerophase=zerophase)
if low > 1:
msg = "Selected low corner frequency is above Nyquist."
raise ValueError(msg)
z, p, k = iirfilter(corners, [low, high], btype='band',
ftype='butter', output='zpk')
sos = zpk2sos(z, p, k)
if zerophase:
firstpass = sosfilt(sos, data)
return sosfilt(sos, firstpass[::-1])[::-1]
else:
return sosfilt(sos, data) |
def alias(self, v, stat='dummy'):
"""Return a representation of a value suitable for use in historical queries.
It will behave much as if you assigned the value to some entity and then used its
``historical`` method to get a reference to the set of its past values, which
happens to contain only the value you've provided here, ``v``.
:arg v: the value to represent
:arg stat: what name to pretend its stat has; usually irrelevant
"""
from .util import EntityStatAccessor
r = DummyEntity(self)
r[stat] = v
return EntityStatAccessor(r, stat, engine=self) | Return a representation of a value suitable for use in historical queries.
It will behave much as if you assigned the value to some entity and then used its
``historical`` method to get a reference to the set of its past values, which
happens to contain only the value you've provided here, ``v``.
:arg v: the value to represent
:arg stat: what name to pretend its stat has; usually irrelevant | Below is the the instruction that describes the task:
### Input:
Return a representation of a value suitable for use in historical queries.
It will behave much as if you assigned the value to some entity and then used its
``historical`` method to get a reference to the set of its past values, which
happens to contain only the value you've provided here, ``v``.
:arg v: the value to represent
:arg stat: what name to pretend its stat has; usually irrelevant
### Response:
def alias(self, v, stat='dummy'):
"""Return a representation of a value suitable for use in historical queries.
It will behave much as if you assigned the value to some entity and then used its
``historical`` method to get a reference to the set of its past values, which
happens to contain only the value you've provided here, ``v``.
:arg v: the value to represent
:arg stat: what name to pretend its stat has; usually irrelevant
"""
from .util import EntityStatAccessor
r = DummyEntity(self)
r[stat] = v
return EntityStatAccessor(r, stat, engine=self) |
def freqpoly_plot(data):
"""make freqpoly plot of merged read lengths"""
rel_data = OrderedDict()
for key, val in data.items():
tot = sum(val.values(), 0)
rel_data[key] = {k: v / tot for k, v in val.items()}
fplotconfig = {
'data_labels': [
{'name': 'Absolute', 'ylab': 'Frequency', 'xlab': 'Merged Read Length'},
{'name': 'Relative', 'ylab': 'Relative Frequency', 'xlab': 'Merged Read Length'}
],
'id': 'flash_freqpoly_plot', 'title': 'FLASh: Frequency of merged read lengths',
'colors': dict(zip(data.keys(), MultiqcModule.get_colors(len(data))))
}
return linegraph.plot([data, rel_data], fplotconfig) | make freqpoly plot of merged read lengths | Below is the the instruction that describes the task:
### Input:
make freqpoly plot of merged read lengths
### Response:
def freqpoly_plot(data):
"""make freqpoly plot of merged read lengths"""
rel_data = OrderedDict()
for key, val in data.items():
tot = sum(val.values(), 0)
rel_data[key] = {k: v / tot for k, v in val.items()}
fplotconfig = {
'data_labels': [
{'name': 'Absolute', 'ylab': 'Frequency', 'xlab': 'Merged Read Length'},
{'name': 'Relative', 'ylab': 'Relative Frequency', 'xlab': 'Merged Read Length'}
],
'id': 'flash_freqpoly_plot', 'title': 'FLASh: Frequency of merged read lengths',
'colors': dict(zip(data.keys(), MultiqcModule.get_colors(len(data))))
}
return linegraph.plot([data, rel_data], fplotconfig) |
def _restore(self, value):
"""Restores the state copied with _copy()"""
items, subs = value
self.clear()
for key, value in items:
self[key] = value
if key in subs:
value.sub_frames._restore(subs[key]) | Restores the state copied with _copy() | Below is the the instruction that describes the task:
### Input:
Restores the state copied with _copy()
### Response:
def _restore(self, value):
"""Restores the state copied with _copy()"""
items, subs = value
self.clear()
for key, value in items:
self[key] = value
if key in subs:
value.sub_frames._restore(subs[key]) |
def quaternion(vector, angle):
"""
Unit quaternion for a vector and an angle
"""
return N.cos(angle/2)+vector*N.sin(angle/2) | Unit quaternion for a vector and an angle | Below is the the instruction that describes the task:
### Input:
Unit quaternion for a vector and an angle
### Response:
def quaternion(vector, angle):
"""
Unit quaternion for a vector and an angle
"""
return N.cos(angle/2)+vector*N.sin(angle/2) |
def setup_callsite(self, state, ret_addr, args, stack_base=None, alloc_base=None, grow_like_stack=True):
"""
This function performs the actions of the caller getting ready to jump into a function.
:param state: The SimState to operate on
:param ret_addr: The address to return to when the called function finishes
:param args: The list of arguments that that the called function will see
:param stack_base: An optional pointer to use as the top of the stack, circa the function entry point
:param alloc_base: An optional pointer to use as the place to put excess argument data
:param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses
The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a
binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the
same type and size, while tuples (representing structs) can be elements of any type and size.
If you'd like there to be a pointer to a given value, wrap the value in a `PointerWrapper`. Any value
that can't fit in a register will be automatically put in a PointerWrapper.
If stack_base is not provided, the current stack pointer will be used, and it will be updated.
If alloc_base is not provided, the stack base will be used and grow_like_stack will implicitly be True.
grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped
in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you
set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequential
allocations happen at increasing addresses.
"""
# STEP 0: clerical work
if isinstance(self, SimCCSoot):
SimEngineSoot.setup_callsite(state, args, ret_addr)
return
allocator = AllocHelper(self.arch.bits, self.arch.memory_endness == 'Iend_LE')
#
# STEP 1: convert all values into serialized form
# this entails creating the vals list of simple values to store and also populating the allocator's
# understanding of what aux data needs to be stored
# This is also where we compute arg locations (arg_locs)
#
if self.func_ty is not None:
vals = [self._standardize_value(arg, ty, state, allocator.dump) for arg, ty in zip(args, self.func_ty.args)]
else:
vals = [self._standardize_value(arg, None, state, allocator.dump) for arg in args]
arg_session = self.arg_session
arg_locs = [None]*len(args)
for i, (arg, val) in enumerate(zip(args, vals)):
if self.is_fp_value(arg) or \
(self.func_ty is not None and isinstance(self.func_ty.args[i], SimTypeFloat)):
arg_locs[i] = arg_session.next_arg(is_fp=True, size=val.length // state.arch.byte_width)
continue
if val.length > state.arch.bits or (self.func_ty is None and isinstance(arg, (bytes, str, list, tuple))):
vals[i] = allocator.dump(val, state)
elif val.length < state.arch.bits:
if self.arch.memory_endness == 'Iend_LE':
vals[i] = val.concat(claripy.BVV(0, state.arch.bits - val.length))
else:
vals[i] = claripy.BVV(0, state.arch.bits - val.length).concat(val)
arg_locs[i] = arg_session.next_arg(is_fp=False, size=vals[i].length // state.arch.byte_width)
#
# STEP 2: decide on memory storage locations
# implement the contract for stack_base/alloc_base/grow_like_stack
# after this, stack_base should be the final stack pointer, alloc_base should be the final aux storage location,
# and the stack pointer should be updated
#
if stack_base is None:
if alloc_base is None:
alloc_size = allocator.size()
state.regs.sp -= alloc_size
alloc_base = state.regs.sp
grow_like_stack = False
state.regs.sp -= self.stack_space(arg_locs)
# handle alignment
alignment = (state.regs.sp + self.STACKARG_SP_DIFF) % self.STACK_ALIGNMENT
state.regs.sp -= alignment
else:
state.regs.sp = stack_base
if alloc_base is None:
alloc_base = stack_base + self.stack_space(arg_locs)
grow_like_stack = False
if grow_like_stack:
alloc_base -= allocator.size()
if type(alloc_base) is int:
alloc_base = claripy.BVV(alloc_base, state.arch.bits)
for i, val in enumerate(vals):
vals[i] = allocator.translate(val, alloc_base)
#
# STEP 3: store everything!
#
allocator.apply(state, alloc_base)
for loc, val in zip(arg_locs, vals):
if val.length > loc.size * 8:
raise ValueError("Can't fit value {} into location {}".format(repr(val), repr(loc)))
loc.set_value(state, val, endness='Iend_BE', stack_base=stack_base)
self.return_addr.set_value(state, ret_addr, stack_base=stack_base) | This function performs the actions of the caller getting ready to jump into a function.
:param state: The SimState to operate on
:param ret_addr: The address to return to when the called function finishes
:param args: The list of arguments that that the called function will see
:param stack_base: An optional pointer to use as the top of the stack, circa the function entry point
:param alloc_base: An optional pointer to use as the place to put excess argument data
:param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses
The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a
binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the
same type and size, while tuples (representing structs) can be elements of any type and size.
If you'd like there to be a pointer to a given value, wrap the value in a `PointerWrapper`. Any value
that can't fit in a register will be automatically put in a PointerWrapper.
If stack_base is not provided, the current stack pointer will be used, and it will be updated.
If alloc_base is not provided, the stack base will be used and grow_like_stack will implicitly be True.
grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped
in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you
set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequential
allocations happen at increasing addresses. | Below is the the instruction that describes the task:
### Input:
This function performs the actions of the caller getting ready to jump into a function.
:param state: The SimState to operate on
:param ret_addr: The address to return to when the called function finishes
:param args: The list of arguments that that the called function will see
:param stack_base: An optional pointer to use as the top of the stack, circa the function entry point
:param alloc_base: An optional pointer to use as the place to put excess argument data
:param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses
The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a
binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the
same type and size, while tuples (representing structs) can be elements of any type and size.
If you'd like there to be a pointer to a given value, wrap the value in a `PointerWrapper`. Any value
that can't fit in a register will be automatically put in a PointerWrapper.
If stack_base is not provided, the current stack pointer will be used, and it will be updated.
If alloc_base is not provided, the stack base will be used and grow_like_stack will implicitly be True.
grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped
in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you
set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequential
allocations happen at increasing addresses.
### Response:
def setup_callsite(self, state, ret_addr, args, stack_base=None, alloc_base=None, grow_like_stack=True):
"""
This function performs the actions of the caller getting ready to jump into a function.
:param state: The SimState to operate on
:param ret_addr: The address to return to when the called function finishes
:param args: The list of arguments that that the called function will see
:param stack_base: An optional pointer to use as the top of the stack, circa the function entry point
:param alloc_base: An optional pointer to use as the place to put excess argument data
:param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses
The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a
binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the
same type and size, while tuples (representing structs) can be elements of any type and size.
If you'd like there to be a pointer to a given value, wrap the value in a `PointerWrapper`. Any value
that can't fit in a register will be automatically put in a PointerWrapper.
If stack_base is not provided, the current stack pointer will be used, and it will be updated.
If alloc_base is not provided, the stack base will be used and grow_like_stack will implicitly be True.
grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped
in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you
set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequential
allocations happen at increasing addresses.
"""
# STEP 0: clerical work
if isinstance(self, SimCCSoot):
SimEngineSoot.setup_callsite(state, args, ret_addr)
return
allocator = AllocHelper(self.arch.bits, self.arch.memory_endness == 'Iend_LE')
#
# STEP 1: convert all values into serialized form
# this entails creating the vals list of simple values to store and also populating the allocator's
# understanding of what aux data needs to be stored
# This is also where we compute arg locations (arg_locs)
#
if self.func_ty is not None:
vals = [self._standardize_value(arg, ty, state, allocator.dump) for arg, ty in zip(args, self.func_ty.args)]
else:
vals = [self._standardize_value(arg, None, state, allocator.dump) for arg in args]
arg_session = self.arg_session
arg_locs = [None]*len(args)
for i, (arg, val) in enumerate(zip(args, vals)):
if self.is_fp_value(arg) or \
(self.func_ty is not None and isinstance(self.func_ty.args[i], SimTypeFloat)):
arg_locs[i] = arg_session.next_arg(is_fp=True, size=val.length // state.arch.byte_width)
continue
if val.length > state.arch.bits or (self.func_ty is None and isinstance(arg, (bytes, str, list, tuple))):
vals[i] = allocator.dump(val, state)
elif val.length < state.arch.bits:
if self.arch.memory_endness == 'Iend_LE':
vals[i] = val.concat(claripy.BVV(0, state.arch.bits - val.length))
else:
vals[i] = claripy.BVV(0, state.arch.bits - val.length).concat(val)
arg_locs[i] = arg_session.next_arg(is_fp=False, size=vals[i].length // state.arch.byte_width)
#
# STEP 2: decide on memory storage locations
# implement the contract for stack_base/alloc_base/grow_like_stack
# after this, stack_base should be the final stack pointer, alloc_base should be the final aux storage location,
# and the stack pointer should be updated
#
if stack_base is None:
if alloc_base is None:
alloc_size = allocator.size()
state.regs.sp -= alloc_size
alloc_base = state.regs.sp
grow_like_stack = False
state.regs.sp -= self.stack_space(arg_locs)
# handle alignment
alignment = (state.regs.sp + self.STACKARG_SP_DIFF) % self.STACK_ALIGNMENT
state.regs.sp -= alignment
else:
state.regs.sp = stack_base
if alloc_base is None:
alloc_base = stack_base + self.stack_space(arg_locs)
grow_like_stack = False
if grow_like_stack:
alloc_base -= allocator.size()
if type(alloc_base) is int:
alloc_base = claripy.BVV(alloc_base, state.arch.bits)
for i, val in enumerate(vals):
vals[i] = allocator.translate(val, alloc_base)
#
# STEP 3: store everything!
#
allocator.apply(state, alloc_base)
for loc, val in zip(arg_locs, vals):
if val.length > loc.size * 8:
raise ValueError("Can't fit value {} into location {}".format(repr(val), repr(loc)))
loc.set_value(state, val, endness='Iend_BE', stack_base=stack_base)
self.return_addr.set_value(state, ret_addr, stack_base=stack_base) |
def nse(sim=None, obs=None, node=None, skip_nan=False):
"""Calculate the efficiency criteria after Nash & Sutcliffe.
If the simulated values predict the observed values as well
as the average observed value (regarding the the mean square
error), the NSE value is zero:
>>> from hydpy import nse
>>> nse(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])
0.0
>>> nse(sim=[0.0, 2.0, 4.0], obs=[1.0, 2.0, 3.0])
0.0
For worse and better simulated values the NSE is negative
or positive, respectively:
>>> nse(sim=[3.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0])
-3.0
>>> nse(sim=[1.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])
0.5
The highest possible value is one:
>>> nse(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0])
1.0
See the documentation on function |prepare_arrays| for some
additional instructions for use of function |nse|.
"""
sim, obs = prepare_arrays(sim, obs, node, skip_nan)
return 1.-numpy.sum((sim-obs)**2)/numpy.sum((obs-numpy.mean(obs))**2) | Calculate the efficiency criteria after Nash & Sutcliffe.
If the simulated values predict the observed values as well
as the average observed value (regarding the the mean square
error), the NSE value is zero:
>>> from hydpy import nse
>>> nse(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])
0.0
>>> nse(sim=[0.0, 2.0, 4.0], obs=[1.0, 2.0, 3.0])
0.0
For worse and better simulated values the NSE is negative
or positive, respectively:
>>> nse(sim=[3.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0])
-3.0
>>> nse(sim=[1.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])
0.5
The highest possible value is one:
>>> nse(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0])
1.0
See the documentation on function |prepare_arrays| for some
additional instructions for use of function |nse|. | Below is the the instruction that describes the task:
### Input:
Calculate the efficiency criteria after Nash & Sutcliffe.
If the simulated values predict the observed values as well
as the average observed value (regarding the the mean square
error), the NSE value is zero:
>>> from hydpy import nse
>>> nse(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])
0.0
>>> nse(sim=[0.0, 2.0, 4.0], obs=[1.0, 2.0, 3.0])
0.0
For worse and better simulated values the NSE is negative
or positive, respectively:
>>> nse(sim=[3.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0])
-3.0
>>> nse(sim=[1.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])
0.5
The highest possible value is one:
>>> nse(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0])
1.0
See the documentation on function |prepare_arrays| for some
additional instructions for use of function |nse|.
### Response:
def nse(sim=None, obs=None, node=None, skip_nan=False):
"""Calculate the efficiency criteria after Nash & Sutcliffe.
If the simulated values predict the observed values as well
as the average observed value (regarding the the mean square
error), the NSE value is zero:
>>> from hydpy import nse
>>> nse(sim=[2.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])
0.0
>>> nse(sim=[0.0, 2.0, 4.0], obs=[1.0, 2.0, 3.0])
0.0
For worse and better simulated values the NSE is negative
or positive, respectively:
>>> nse(sim=[3.0, 2.0, 1.0], obs=[1.0, 2.0, 3.0])
-3.0
>>> nse(sim=[1.0, 2.0, 2.0], obs=[1.0, 2.0, 3.0])
0.5
The highest possible value is one:
>>> nse(sim=[1.0, 2.0, 3.0], obs=[1.0, 2.0, 3.0])
1.0
See the documentation on function |prepare_arrays| for some
additional instructions for use of function |nse|.
"""
sim, obs = prepare_arrays(sim, obs, node, skip_nan)
return 1.-numpy.sum((sim-obs)**2)/numpy.sum((obs-numpy.mean(obs))**2) |
def save_ipv6(self, ip6, id_equip, descricao, id_net):
"""
Save an IP6 and associate with equipment
:param ip6: An IP6 available to save in format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx.
:param id_equip: Equipment identifier. Integer value and greater than zero.
:param descricao: IPv6 description.
:param id_net: Network identifier. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'ipv6': {'id': < id >,
'block1': <block1>,
'block2': <block2>,
'block3': <block3>,
'block4': <block4>,
'block5': <block5>,
'block6': <block6>,
'block7': <block7>,
'block8': <block8>,
'descricao': < description >,
'equipamento': [ { all name equipamentos related } ], }}
"""
if not is_valid_int_param(id_net):
raise InvalidParameterError(
u'Network identifier is invalid or was not informed.')
if not is_valid_int_param(id_equip):
raise InvalidParameterError(
u'Equipment identifier is invalid or was not informed.')
if ip6 is None or ip6 == "":
raise InvalidParameterError(
u'IPv6 is invalid or was not informed.')
ip_map = dict()
ip_map['id_net'] = id_net
ip_map['descricao'] = descricao
ip_map['ip6'] = ip6
ip_map['id_equip'] = id_equip
url = "ipv6/save/"
code, xml = self.submit({'ip_map': ip_map}, 'POST', url)
return self.response(code, xml) | Save an IP6 and associate with equipment
:param ip6: An IP6 available to save in format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx.
:param id_equip: Equipment identifier. Integer value and greater than zero.
:param descricao: IPv6 description.
:param id_net: Network identifier. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'ipv6': {'id': < id >,
'block1': <block1>,
'block2': <block2>,
'block3': <block3>,
'block4': <block4>,
'block5': <block5>,
'block6': <block6>,
'block7': <block7>,
'block8': <block8>,
'descricao': < description >,
'equipamento': [ { all name equipamentos related } ], }} | Below is the the instruction that describes the task:
### Input:
Save an IP6 and associate with equipment
:param ip6: An IP6 available to save in format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx.
:param id_equip: Equipment identifier. Integer value and greater than zero.
:param descricao: IPv6 description.
:param id_net: Network identifier. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'ipv6': {'id': < id >,
'block1': <block1>,
'block2': <block2>,
'block3': <block3>,
'block4': <block4>,
'block5': <block5>,
'block6': <block6>,
'block7': <block7>,
'block8': <block8>,
'descricao': < description >,
'equipamento': [ { all name equipamentos related } ], }}
### Response:
def save_ipv6(self, ip6, id_equip, descricao, id_net):
"""
Save an IP6 and associate with equipment
:param ip6: An IP6 available to save in format xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx.
:param id_equip: Equipment identifier. Integer value and greater than zero.
:param descricao: IPv6 description.
:param id_net: Network identifier. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'ipv6': {'id': < id >,
'block1': <block1>,
'block2': <block2>,
'block3': <block3>,
'block4': <block4>,
'block5': <block5>,
'block6': <block6>,
'block7': <block7>,
'block8': <block8>,
'descricao': < description >,
'equipamento': [ { all name equipamentos related } ], }}
"""
if not is_valid_int_param(id_net):
raise InvalidParameterError(
u'Network identifier is invalid or was not informed.')
if not is_valid_int_param(id_equip):
raise InvalidParameterError(
u'Equipment identifier is invalid or was not informed.')
if ip6 is None or ip6 == "":
raise InvalidParameterError(
u'IPv6 is invalid or was not informed.')
ip_map = dict()
ip_map['id_net'] = id_net
ip_map['descricao'] = descricao
ip_map['ip6'] = ip6
ip_map['id_equip'] = id_equip
url = "ipv6/save/"
code, xml = self.submit({'ip_map': ip_map}, 'POST', url)
return self.response(code, xml) |
def digestSession(self, mecha=MechanismSHA1):
"""
C_DigestInit/C_DigestUpdate/C_DigestKey/C_DigestFinal
:param mecha: the digesting mechanism to be used
(use `MechanismSHA1` for `CKM_SHA_1`)
:type mecha: :class:`Mechanism`
:return: A :class:`DigestSession` object
:rtype: DigestSession
"""
return DigestSession(self.lib, self.session, mecha) | C_DigestInit/C_DigestUpdate/C_DigestKey/C_DigestFinal
:param mecha: the digesting mechanism to be used
(use `MechanismSHA1` for `CKM_SHA_1`)
:type mecha: :class:`Mechanism`
:return: A :class:`DigestSession` object
:rtype: DigestSession | Below is the the instruction that describes the task:
### Input:
C_DigestInit/C_DigestUpdate/C_DigestKey/C_DigestFinal
:param mecha: the digesting mechanism to be used
(use `MechanismSHA1` for `CKM_SHA_1`)
:type mecha: :class:`Mechanism`
:return: A :class:`DigestSession` object
:rtype: DigestSession
### Response:
def digestSession(self, mecha=MechanismSHA1):
"""
C_DigestInit/C_DigestUpdate/C_DigestKey/C_DigestFinal
:param mecha: the digesting mechanism to be used
(use `MechanismSHA1` for `CKM_SHA_1`)
:type mecha: :class:`Mechanism`
:return: A :class:`DigestSession` object
:rtype: DigestSession
"""
return DigestSession(self.lib, self.session, mecha) |
def _push_writer(self):
"""push a capturing buffer onto this Context and return
the new writer function."""
buf = util.FastEncodingBuffer()
self._buffer_stack.append(buf)
return buf.write | push a capturing buffer onto this Context and return
the new writer function. | Below is the the instruction that describes the task:
### Input:
push a capturing buffer onto this Context and return
the new writer function.
### Response:
def _push_writer(self):
"""push a capturing buffer onto this Context and return
the new writer function."""
buf = util.FastEncodingBuffer()
self._buffer_stack.append(buf)
return buf.write |
def prune(self):
"""
Remove anything which shouldn't be displayed.
"""
def to_include(obj):
inc = obj.permission in self.display
if self.settings['hide_undoc'].lower() == 'true' and not obj.doc:
inc = False
return inc
if self.obj == 'proc' and self.meta['proc_internals'] == 'false':
self.functions = []
self.subroutines = []
self.types = []
self.interfaces = []
self.absinterfaces = []
self.variables = []
else:
self.functions = [obj for obj in self.functions if to_include(obj)]
self.subroutines = [obj for obj in self.subroutines if to_include(obj)]
self.types = [obj for obj in self.types if to_include(obj)]
self.interfaces = [obj for obj in self.interfaces if to_include(obj)]
self.absinterfaces = [obj for obj in self.absinterfaces if to_include(obj)]
self.variables = [obj for obj in self.variables if to_include(obj)]
if hasattr(self,'modprocedures'):
self.modprocedures = [obj for obj in self.modprocedures if to_include(obj)]
if hasattr(self,'modsubroutines'):
self.modsubroutines = [obj for obj in self.modsubroutines if to_include(obj)]
if hasattr(self,'modfunctions'):
self.modfunctions = [obj for obj in self.modfunctions if to_include(obj)]
# Recurse
for obj in self.absinterfaces:
obj.visible = True
for obj in self.iterator('functions', 'subroutines', 'types', 'interfaces', 'modprocedures', 'modfunctions', 'modsubroutines'):
obj.visible = True
for obj in self.iterator('functions', 'subroutines', 'types', 'modprocedures', 'modfunctions', 'modsubroutines'):
obj.prune() | Remove anything which shouldn't be displayed. | Below is the the instruction that describes the task:
### Input:
Remove anything which shouldn't be displayed.
### Response:
def prune(self):
"""
Remove anything which shouldn't be displayed.
"""
def to_include(obj):
inc = obj.permission in self.display
if self.settings['hide_undoc'].lower() == 'true' and not obj.doc:
inc = False
return inc
if self.obj == 'proc' and self.meta['proc_internals'] == 'false':
self.functions = []
self.subroutines = []
self.types = []
self.interfaces = []
self.absinterfaces = []
self.variables = []
else:
self.functions = [obj for obj in self.functions if to_include(obj)]
self.subroutines = [obj for obj in self.subroutines if to_include(obj)]
self.types = [obj for obj in self.types if to_include(obj)]
self.interfaces = [obj for obj in self.interfaces if to_include(obj)]
self.absinterfaces = [obj for obj in self.absinterfaces if to_include(obj)]
self.variables = [obj for obj in self.variables if to_include(obj)]
if hasattr(self,'modprocedures'):
self.modprocedures = [obj for obj in self.modprocedures if to_include(obj)]
if hasattr(self,'modsubroutines'):
self.modsubroutines = [obj for obj in self.modsubroutines if to_include(obj)]
if hasattr(self,'modfunctions'):
self.modfunctions = [obj for obj in self.modfunctions if to_include(obj)]
# Recurse
for obj in self.absinterfaces:
obj.visible = True
for obj in self.iterator('functions', 'subroutines', 'types', 'interfaces', 'modprocedures', 'modfunctions', 'modsubroutines'):
obj.visible = True
for obj in self.iterator('functions', 'subroutines', 'types', 'modprocedures', 'modfunctions', 'modsubroutines'):
obj.prune() |
def open_file(path, grib_errors='warn', **kwargs):
"""Open a GRIB file as a ``cfgrib.Dataset``."""
if 'mode' in kwargs:
warnings.warn("the `mode` keyword argument is ignored and deprecated", FutureWarning)
kwargs.pop('mode')
stream = messages.FileStream(path, message_class=cfmessage.CfMessage, errors=grib_errors)
return Dataset(*build_dataset_components(stream, **kwargs)) | Open a GRIB file as a ``cfgrib.Dataset``. | Below is the the instruction that describes the task:
### Input:
Open a GRIB file as a ``cfgrib.Dataset``.
### Response:
def open_file(path, grib_errors='warn', **kwargs):
"""Open a GRIB file as a ``cfgrib.Dataset``."""
if 'mode' in kwargs:
warnings.warn("the `mode` keyword argument is ignored and deprecated", FutureWarning)
kwargs.pop('mode')
stream = messages.FileStream(path, message_class=cfmessage.CfMessage, errors=grib_errors)
return Dataset(*build_dataset_components(stream, **kwargs)) |
def initialize_concept_scheme(rdf, cs, label, language, set_modified):
"""Initialize a concept scheme: Optionally add a label if the concept
scheme doesn't have a label, and optionally add a dct:modified
timestamp."""
# check whether the concept scheme is unlabeled, and label it if possible
labels = list(rdf.objects(cs, RDFS.label)) + \
list(rdf.objects(cs, SKOS.prefLabel))
if len(labels) == 0:
if not label:
logging.warning(
"Concept scheme has no label(s). "
"Use --label option to set the concept scheme label.")
else:
logging.info(
"Unlabeled concept scheme detected. Setting label to '%s'" %
label)
rdf.add((cs, RDFS.label, Literal(label, language)))
if set_modified:
curdate = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'
rdf.remove((cs, DCTERMS.modified, None))
rdf.add((cs, DCTERMS.modified, Literal(curdate, datatype=XSD.dateTime))) | Initialize a concept scheme: Optionally add a label if the concept
scheme doesn't have a label, and optionally add a dct:modified
timestamp. | Below is the the instruction that describes the task:
### Input:
Initialize a concept scheme: Optionally add a label if the concept
scheme doesn't have a label, and optionally add a dct:modified
timestamp.
### Response:
def initialize_concept_scheme(rdf, cs, label, language, set_modified):
"""Initialize a concept scheme: Optionally add a label if the concept
scheme doesn't have a label, and optionally add a dct:modified
timestamp."""
# check whether the concept scheme is unlabeled, and label it if possible
labels = list(rdf.objects(cs, RDFS.label)) + \
list(rdf.objects(cs, SKOS.prefLabel))
if len(labels) == 0:
if not label:
logging.warning(
"Concept scheme has no label(s). "
"Use --label option to set the concept scheme label.")
else:
logging.info(
"Unlabeled concept scheme detected. Setting label to '%s'" %
label)
rdf.add((cs, RDFS.label, Literal(label, language)))
if set_modified:
curdate = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + 'Z'
rdf.remove((cs, DCTERMS.modified, None))
rdf.add((cs, DCTERMS.modified, Literal(curdate, datatype=XSD.dateTime))) |
def _cache_morlist_raw(self, instance):
"""
Fill the Mor objects queue that will be asynchronously processed later.
Resolve the vCenter `rootFolder` and initiate hosts and virtual machines
discovery.
"""
i_key = self._instance_key(instance)
self.log.debug("Caching the morlist for vcenter instance {}".format(i_key))
# If the queue is not completely empty, don't do anything
for resource_type in RESOURCE_TYPE_METRICS:
if self.mor_objects_queue.contains(i_key) and self.mor_objects_queue.size(i_key, resource_type):
last = self.cache_config.get_last(CacheConfig.Morlist, i_key)
self.log.debug(
"Skipping morlist collection: the objects queue for the "
"resource type '{}' is still being processed "
"(latest refresh was {}s ago)".format(ensure_unicode(resource_type), time.time() - last)
)
return
tags = ["vcenter_server:{}".format(ensure_unicode(instance.get('name')))]
regexes = {
'host_include': instance.get('host_include_only_regex'),
'vm_include': instance.get('vm_include_only_regex'),
}
include_only_marked = is_affirmative(instance.get('include_only_marked', False))
# Discover hosts and virtual machines
server_instance = self._get_server_instance(instance)
use_guest_hostname = is_affirmative(instance.get("use_guest_hostname", False))
all_objs = self._get_all_objs(
server_instance, regexes, include_only_marked, tags, use_guest_hostname=use_guest_hostname
)
self.mor_objects_queue.fill(i_key, dict(all_objs))
self.cache_config.set_last(CacheConfig.Morlist, i_key, time.time()) | Fill the Mor objects queue that will be asynchronously processed later.
Resolve the vCenter `rootFolder` and initiate hosts and virtual machines
discovery. | Below is the the instruction that describes the task:
### Input:
Fill the Mor objects queue that will be asynchronously processed later.
Resolve the vCenter `rootFolder` and initiate hosts and virtual machines
discovery.
### Response:
def _cache_morlist_raw(self, instance):
"""
Fill the Mor objects queue that will be asynchronously processed later.
Resolve the vCenter `rootFolder` and initiate hosts and virtual machines
discovery.
"""
i_key = self._instance_key(instance)
self.log.debug("Caching the morlist for vcenter instance {}".format(i_key))
# If the queue is not completely empty, don't do anything
for resource_type in RESOURCE_TYPE_METRICS:
if self.mor_objects_queue.contains(i_key) and self.mor_objects_queue.size(i_key, resource_type):
last = self.cache_config.get_last(CacheConfig.Morlist, i_key)
self.log.debug(
"Skipping morlist collection: the objects queue for the "
"resource type '{}' is still being processed "
"(latest refresh was {}s ago)".format(ensure_unicode(resource_type), time.time() - last)
)
return
tags = ["vcenter_server:{}".format(ensure_unicode(instance.get('name')))]
regexes = {
'host_include': instance.get('host_include_only_regex'),
'vm_include': instance.get('vm_include_only_regex'),
}
include_only_marked = is_affirmative(instance.get('include_only_marked', False))
# Discover hosts and virtual machines
server_instance = self._get_server_instance(instance)
use_guest_hostname = is_affirmative(instance.get("use_guest_hostname", False))
all_objs = self._get_all_objs(
server_instance, regexes, include_only_marked, tags, use_guest_hostname=use_guest_hostname
)
self.mor_objects_queue.fill(i_key, dict(all_objs))
self.cache_config.set_last(CacheConfig.Morlist, i_key, time.time()) |
def int_flags(flags, mapper=const.PERM_STRING_MAP):
"""
Converts string permission flags into integer permission flags as
specified in const.PERM_STRING_MAP
Arguments:
- flags <str>: one or more flags
For example: "crud" or "ru" or "r"
- mapper <list=const.PERM_STRING_MAP>: a list containing tuples mapping
int permission flag to string permission flag. If not specified will
default to const.PERM_STRING_MAP.
Returns:
- int
"""
r = 0
if not flags:
return r
if isinstance(flags, six.integer_types):
return flags
if not isinstance(flags, six.string_types):
raise TypeError("`flags` needs to be a string or integer type")
for f in flags:
for f_i, f_s in mapper:
if f_s == f:
r = r | f_i
return r | Converts string permission flags into integer permission flags as
specified in const.PERM_STRING_MAP
Arguments:
- flags <str>: one or more flags
For example: "crud" or "ru" or "r"
- mapper <list=const.PERM_STRING_MAP>: a list containing tuples mapping
int permission flag to string permission flag. If not specified will
default to const.PERM_STRING_MAP.
Returns:
- int | Below is the the instruction that describes the task:
### Input:
Converts string permission flags into integer permission flags as
specified in const.PERM_STRING_MAP
Arguments:
- flags <str>: one or more flags
For example: "crud" or "ru" or "r"
- mapper <list=const.PERM_STRING_MAP>: a list containing tuples mapping
int permission flag to string permission flag. If not specified will
default to const.PERM_STRING_MAP.
Returns:
- int
### Response:
def int_flags(flags, mapper=const.PERM_STRING_MAP):
"""
Converts string permission flags into integer permission flags as
specified in const.PERM_STRING_MAP
Arguments:
- flags <str>: one or more flags
For example: "crud" or "ru" or "r"
- mapper <list=const.PERM_STRING_MAP>: a list containing tuples mapping
int permission flag to string permission flag. If not specified will
default to const.PERM_STRING_MAP.
Returns:
- int
"""
r = 0
if not flags:
return r
if isinstance(flags, six.integer_types):
return flags
if not isinstance(flags, six.string_types):
raise TypeError("`flags` needs to be a string or integer type")
for f in flags:
for f_i, f_s in mapper:
if f_s == f:
r = r | f_i
return r |
def generate_primes():
"""
Generate an infinite sequence of prime numbers.
The algorithm was originally written by David Eppstein, UC Irvine. See:
http://code.activestate.com/recipes/117119/
Examples
--------
>>> g = generate_primes()
>>> next(g)
2
>>> next(g)
3
>>> next(g)
5
"""
divisors = {} # map number to at least one divisor
candidate = 2 # next potential prime
while True:
if candidate in divisors:
# candidate is composite. divisors[candidate] is the list of primes
# that divide it. Since we've reached candidate, we no longer need
# it in the map, but we'll mark the next multiples of its witnesses
# to prepare for larger numbers
for p in divisors[candidate]:
divisors.setdefault(p + candidate, []).append(p)
del divisors[candidate]
else:
# candidate is a new prime
yield candidate
# mark its first multiple that isn't
# already marked in previous iterations
divisors[candidate * candidate] = [candidate]
candidate += 1 | Generate an infinite sequence of prime numbers.
The algorithm was originally written by David Eppstein, UC Irvine. See:
http://code.activestate.com/recipes/117119/
Examples
--------
>>> g = generate_primes()
>>> next(g)
2
>>> next(g)
3
>>> next(g)
5 | Below is the the instruction that describes the task:
### Input:
Generate an infinite sequence of prime numbers.
The algorithm was originally written by David Eppstein, UC Irvine. See:
http://code.activestate.com/recipes/117119/
Examples
--------
>>> g = generate_primes()
>>> next(g)
2
>>> next(g)
3
>>> next(g)
5
### Response:
def generate_primes():
"""
Generate an infinite sequence of prime numbers.
The algorithm was originally written by David Eppstein, UC Irvine. See:
http://code.activestate.com/recipes/117119/
Examples
--------
>>> g = generate_primes()
>>> next(g)
2
>>> next(g)
3
>>> next(g)
5
"""
divisors = {} # map number to at least one divisor
candidate = 2 # next potential prime
while True:
if candidate in divisors:
# candidate is composite. divisors[candidate] is the list of primes
# that divide it. Since we've reached candidate, we no longer need
# it in the map, but we'll mark the next multiples of its witnesses
# to prepare for larger numbers
for p in divisors[candidate]:
divisors.setdefault(p + candidate, []).append(p)
del divisors[candidate]
else:
# candidate is a new prime
yield candidate
# mark its first multiple that isn't
# already marked in previous iterations
divisors[candidate * candidate] = [candidate]
candidate += 1 |
def remove_column(self, column_name, inplace=False):
"""
Returns an SFrame with a column removed.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : string
The name of the column to remove.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> res = sf.remove_column('val')
>>> res
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
column_name = str(column_name)
if column_name not in self.column_names():
raise KeyError('Cannot find column %s' % column_name)
colid = self.column_names().index(column_name)
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.remove_column(colid)
ret._cache = None
return ret | Returns an SFrame with a column removed.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : string
The name of the column to remove.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> res = sf.remove_column('val')
>>> res
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns] | Below is the the instruction that describes the task:
### Input:
Returns an SFrame with a column removed.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : string
The name of the column to remove.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> res = sf.remove_column('val')
>>> res
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
### Response:
def remove_column(self, column_name, inplace=False):
"""
Returns an SFrame with a column removed.
If inplace == False (default) this operation does not modify the
current SFrame, returning a new SFrame.
If inplace == True, this operation modifies the current
SFrame, returning self.
Parameters
----------
column_name : string
The name of the column to remove.
inplace : bool, optional. Defaults to False.
Whether the SFrame is modified in place.
Returns
-------
out : SFrame
The SFrame with given column removed.
Examples
--------
>>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']})
>>> # This is equivalent to `del sf['val']`
>>> res = sf.remove_column('val')
>>> res
+----+
| id |
+----+
| 1 |
| 2 |
| 3 |
+----+
[3 rows x 1 columns]
"""
column_name = str(column_name)
if column_name not in self.column_names():
raise KeyError('Cannot find column %s' % column_name)
colid = self.column_names().index(column_name)
if inplace:
ret = self
else:
ret = self.copy()
with cython_context():
ret.__proxy__.remove_column(colid)
ret._cache = None
return ret |
def generate_tile_coordinates(roi, num_tiles):
# type: (GeoVector, Tuple[int, int]) -> Iterator[GeoVector]
"""Yields N x M rectangular tiles for a region of interest.
Parameters
----------
roi : GeoVector
Region of interest
num_tiles : tuple
Tuple (horizontal_tiles, vertical_tiles)
Yields
------
~telluric.vectors.GeoVector
"""
bounds = roi.get_shape(roi.crs).bounds
x_range = np.linspace(bounds[0], bounds[2], int(num_tiles[0]) + 1)
y_range = np.linspace(bounds[1], bounds[3], int(num_tiles[1]) + 1)
for y_start, y_end in zip(y_range[:-1], y_range[1:]):
for x_start, x_end in zip(x_range[:-1], x_range[1:]):
new_roi = GeoVector(
Polygon.from_bounds(x_start, y_start, x_end, y_end),
roi.crs
)
yield new_roi | Yields N x M rectangular tiles for a region of interest.
Parameters
----------
roi : GeoVector
Region of interest
num_tiles : tuple
Tuple (horizontal_tiles, vertical_tiles)
Yields
------
~telluric.vectors.GeoVector | Below is the the instruction that describes the task:
### Input:
Yields N x M rectangular tiles for a region of interest.
Parameters
----------
roi : GeoVector
Region of interest
num_tiles : tuple
Tuple (horizontal_tiles, vertical_tiles)
Yields
------
~telluric.vectors.GeoVector
### Response:
def generate_tile_coordinates(roi, num_tiles):
# type: (GeoVector, Tuple[int, int]) -> Iterator[GeoVector]
"""Yields N x M rectangular tiles for a region of interest.
Parameters
----------
roi : GeoVector
Region of interest
num_tiles : tuple
Tuple (horizontal_tiles, vertical_tiles)
Yields
------
~telluric.vectors.GeoVector
"""
bounds = roi.get_shape(roi.crs).bounds
x_range = np.linspace(bounds[0], bounds[2], int(num_tiles[0]) + 1)
y_range = np.linspace(bounds[1], bounds[3], int(num_tiles[1]) + 1)
for y_start, y_end in zip(y_range[:-1], y_range[1:]):
for x_start, x_end in zip(x_range[:-1], x_range[1:]):
new_roi = GeoVector(
Polygon.from_bounds(x_start, y_start, x_end, y_end),
roi.crs
)
yield new_roi |
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = logging.currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == logging._srcfile or filename == self._srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
break
return rv | Find the stack frame of the caller so that we can note the source
file name, line number and function name. | Below is the the instruction that describes the task:
### Input:
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
### Response:
def findCaller(self, stack_info=False):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = logging.currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == logging._srcfile or filename == self._srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
break
return rv |
def release(self, signal=True):
"""
Unlocks the account.
Method has no effect if the constructor argument `needs_lock`
wsa set to False.
:type signal: bool
:param signal: Whether to emit the released_event signal.
"""
if not self.needs_lock:
return
with self.synclock:
self.lock.release()
if signal:
self.released_event(self)
self.synclock.notify_all() | Unlocks the account.
Method has no effect if the constructor argument `needs_lock`
wsa set to False.
:type signal: bool
:param signal: Whether to emit the released_event signal. | Below is the the instruction that describes the task:
### Input:
Unlocks the account.
Method has no effect if the constructor argument `needs_lock`
wsa set to False.
:type signal: bool
:param signal: Whether to emit the released_event signal.
### Response:
def release(self, signal=True):
"""
Unlocks the account.
Method has no effect if the constructor argument `needs_lock`
wsa set to False.
:type signal: bool
:param signal: Whether to emit the released_event signal.
"""
if not self.needs_lock:
return
with self.synclock:
self.lock.release()
if signal:
self.released_event(self)
self.synclock.notify_all() |
def dt_str_to_posix(dt_str):
"""format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC.
"""
parsable, _ = dt_str.split('.')
dt = datetime.datetime.strptime(parsable, _DT_FORMAT)
return calendar.timegm(dt.utctimetuple()) | format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC. | Below is the the instruction that describes the task:
### Input:
format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC.
### Response:
def dt_str_to_posix(dt_str):
"""format str to posix.
datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ,
e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator
between date and time when they are on the same line.
Z indicates UTC (zero meridian).
A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html
This is used to parse LastModified node from GCS's GET bucket XML response.
Args:
dt_str: A datetime str.
Returns:
A float of secs from unix epoch. By posix definition, epoch is midnight
1970/1/1 UTC.
"""
parsable, _ = dt_str.split('.')
dt = datetime.datetime.strptime(parsable, _DT_FORMAT)
return calendar.timegm(dt.utctimetuple()) |
def _print_foreign_repetition_table(self, idset1, idset2):
"""
:param idset1:
:param idset2:
"""
assert(isinstance(idset1, idset_with_reference))
assert(isinstance(idset2, idset))
reps = idset2.get_repetitions()
if len(reps) < 1:
return
refs = np.array(idset1.reflst)
table = [['{0} {1} values of repetitions in {2}'.format(idset1.name,
idset1.refname,
idset2.name),
'']]
for rep in reps:
if np.any(idset1 == rep):
matches = refs[np.where(idset1 == rep)]
myrep = rep
for m in matches:
table.append([myrep, m])
myrep = ''
print(tabulate(table, headers='firstrow'))
print('\n') | :param idset1:
:param idset2: | Below is the the instruction that describes the task:
### Input:
:param idset1:
:param idset2:
### Response:
def _print_foreign_repetition_table(self, idset1, idset2):
"""
:param idset1:
:param idset2:
"""
assert(isinstance(idset1, idset_with_reference))
assert(isinstance(idset2, idset))
reps = idset2.get_repetitions()
if len(reps) < 1:
return
refs = np.array(idset1.reflst)
table = [['{0} {1} values of repetitions in {2}'.format(idset1.name,
idset1.refname,
idset2.name),
'']]
for rep in reps:
if np.any(idset1 == rep):
matches = refs[np.where(idset1 == rep)]
myrep = rep
for m in matches:
table.append([myrep, m])
myrep = ''
print(tabulate(table, headers='firstrow'))
print('\n') |
def get_query_indexes(self, raw_result=False):
"""
Retrieves query indexes from the remote database.
:param bool raw_result: If set to True then the raw JSON content for
the request is returned. Default is to return a list containing
:class:`~cloudant.index.Index`,
:class:`~cloudant.index.TextIndex`, and
:class:`~cloudant.index.SpecialIndex` wrapped objects.
:returns: The query indexes in the database
"""
url = '/'.join((self.database_url, '_index'))
resp = self.r_session.get(url)
resp.raise_for_status()
if raw_result:
return response_to_json_dict(resp)
indexes = []
for data in response_to_json_dict(resp).get('indexes', []):
if data.get('type') == JSON_INDEX_TYPE:
indexes.append(Index(
self,
data.get('ddoc'),
data.get('name'),
partitioned=data.get('partitioned', False),
**data.get('def', {})
))
elif data.get('type') == TEXT_INDEX_TYPE:
indexes.append(TextIndex(
self,
data.get('ddoc'),
data.get('name'),
partitioned=data.get('partitioned', False),
**data.get('def', {})
))
elif data.get('type') == SPECIAL_INDEX_TYPE:
indexes.append(SpecialIndex(
self,
data.get('ddoc'),
data.get('name'),
partitioned=data.get('partitioned', False),
**data.get('def', {})
))
else:
raise CloudantDatabaseException(101, data.get('type'))
return indexes | Retrieves query indexes from the remote database.
:param bool raw_result: If set to True then the raw JSON content for
the request is returned. Default is to return a list containing
:class:`~cloudant.index.Index`,
:class:`~cloudant.index.TextIndex`, and
:class:`~cloudant.index.SpecialIndex` wrapped objects.
:returns: The query indexes in the database | Below is the the instruction that describes the task:
### Input:
Retrieves query indexes from the remote database.
:param bool raw_result: If set to True then the raw JSON content for
the request is returned. Default is to return a list containing
:class:`~cloudant.index.Index`,
:class:`~cloudant.index.TextIndex`, and
:class:`~cloudant.index.SpecialIndex` wrapped objects.
:returns: The query indexes in the database
### Response:
def get_query_indexes(self, raw_result=False):
"""
Retrieves query indexes from the remote database.
:param bool raw_result: If set to True then the raw JSON content for
the request is returned. Default is to return a list containing
:class:`~cloudant.index.Index`,
:class:`~cloudant.index.TextIndex`, and
:class:`~cloudant.index.SpecialIndex` wrapped objects.
:returns: The query indexes in the database
"""
url = '/'.join((self.database_url, '_index'))
resp = self.r_session.get(url)
resp.raise_for_status()
if raw_result:
return response_to_json_dict(resp)
indexes = []
for data in response_to_json_dict(resp).get('indexes', []):
if data.get('type') == JSON_INDEX_TYPE:
indexes.append(Index(
self,
data.get('ddoc'),
data.get('name'),
partitioned=data.get('partitioned', False),
**data.get('def', {})
))
elif data.get('type') == TEXT_INDEX_TYPE:
indexes.append(TextIndex(
self,
data.get('ddoc'),
data.get('name'),
partitioned=data.get('partitioned', False),
**data.get('def', {})
))
elif data.get('type') == SPECIAL_INDEX_TYPE:
indexes.append(SpecialIndex(
self,
data.get('ddoc'),
data.get('name'),
partitioned=data.get('partitioned', False),
**data.get('def', {})
))
else:
raise CloudantDatabaseException(101, data.get('type'))
return indexes |
def delete_document(self, doc_id, conn=None):
"""
Delete a document from index
Returns 1 if the document was deleted, 0 if not
"""
if conn is None:
conn = self.redis
return conn.execute_command(self.DEL_CMD, self.index_name, doc_id) | Delete a document from index
Returns 1 if the document was deleted, 0 if not | Below is the the instruction that describes the task:
### Input:
Delete a document from index
Returns 1 if the document was deleted, 0 if not
### Response:
def delete_document(self, doc_id, conn=None):
"""
Delete a document from index
Returns 1 if the document was deleted, 0 if not
"""
if conn is None:
conn = self.redis
return conn.execute_command(self.DEL_CMD, self.index_name, doc_id) |
def is_solid(regex):
"""
Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False
"""
shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex)
skeleton = shape.replace('#', '')
if len(shape) <= 1:
return True
if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape):
return True
if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape):
return True
if re.match(r'^\(\)#*?\)\)', skeleton):
return True
else:
return False | Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False | Below is the the instruction that describes the task:
### Input:
Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False
### Response:
def is_solid(regex):
"""
Check the given regular expression is solid.
>>> is_solid(r'a')
True
>>> is_solid(r'[ab]')
True
>>> is_solid(r'(a|b|c)')
True
>>> is_solid(r'(a|b|c)?')
True
>>> is_solid(r'(a|b)(c)')
False
>>> is_solid(r'(a|b)(c)?')
False
"""
shape = re.sub(r'(\\.|[^\[\]\(\)\|\?\+\*])', '#', regex)
skeleton = shape.replace('#', '')
if len(shape) <= 1:
return True
if re.match(r'^\[[^\]]*\][\*\+\?]?$', shape):
return True
if re.match(r'^\([^\(]*\)[\*\+\?]?$', shape):
return True
if re.match(r'^\(\)#*?\)\)', skeleton):
return True
else:
return False |
def from_jd(jd):
'''Calculate Indian Civil date from Julian day
Offset in years from Saka era to Gregorian epoch'''
start = 80
# Day offset between Saka and Gregorian
jd = trunc(jd) + 0.5
greg = gregorian.from_jd(jd) # Gregorian date for Julian day
leap = isleap(greg[0]) # Is this a leap year?
# Tentative year in Saka era
year = greg[0] - SAKA_EPOCH
# JD at start of Gregorian year
greg0 = gregorian.to_jd(greg[0], 1, 1)
yday = jd - greg0 # Day number (0 based) in Gregorian year
if leap:
Caitra = 31 # Days in Caitra this year
else:
Caitra = 30
if yday < start:
# Day is at the end of the preceding Saka year
year -= 1
yday += Caitra + (31 * 5) + (30 * 3) + 10 + start
yday -= start
if yday < Caitra:
month = 1
day = yday + 1
else:
mday = yday - Caitra
if (mday < (31 * 5)):
month = trunc(mday / 31) + 2
day = (mday % 31) + 1
else:
mday -= 31 * 5
month = trunc(mday / 30) + 7
day = (mday % 30) + 1
return (year, month, int(day)) | Calculate Indian Civil date from Julian day
Offset in years from Saka era to Gregorian epoch | Below is the the instruction that describes the task:
### Input:
Calculate Indian Civil date from Julian day
Offset in years from Saka era to Gregorian epoch
### Response:
def from_jd(jd):
'''Calculate Indian Civil date from Julian day
Offset in years from Saka era to Gregorian epoch'''
start = 80
# Day offset between Saka and Gregorian
jd = trunc(jd) + 0.5
greg = gregorian.from_jd(jd) # Gregorian date for Julian day
leap = isleap(greg[0]) # Is this a leap year?
# Tentative year in Saka era
year = greg[0] - SAKA_EPOCH
# JD at start of Gregorian year
greg0 = gregorian.to_jd(greg[0], 1, 1)
yday = jd - greg0 # Day number (0 based) in Gregorian year
if leap:
Caitra = 31 # Days in Caitra this year
else:
Caitra = 30
if yday < start:
# Day is at the end of the preceding Saka year
year -= 1
yday += Caitra + (31 * 5) + (30 * 3) + 10 + start
yday -= start
if yday < Caitra:
month = 1
day = yday + 1
else:
mday = yday - Caitra
if (mday < (31 * 5)):
month = trunc(mday / 31) + 2
day = (mday % 31) + 1
else:
mday -= 31 * 5
month = trunc(mday / 30) + 7
day = (mday % 30) + 1
return (year, month, int(day)) |
def reload_(name):
'''
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
if _service_is_upstart(name):
cmd = 'reload {0}'.format(name)
else:
cmd = '/sbin/service {0} reload'.format(name)
return not __salt__['cmd.retcode'](cmd, python_shell=False) | Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name> | Below is the the instruction that describes the task:
### Input:
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
### Response:
def reload_(name):
'''
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
if _service_is_upstart(name):
cmd = 'reload {0}'.format(name)
else:
cmd = '/sbin/service {0} reload'.format(name)
return not __salt__['cmd.retcode'](cmd, python_shell=False) |
def htmlize_paragraphs(text):
"""
Convert paragraphs delimited by blank lines into HTML text enclosed
in <p> tags.
"""
paragraphs = re.split('(\r?\n)\s*(\r?\n)', text)
return '\n'.join('<p>%s</p>' % paragraph for paragraph in paragraphs) | Convert paragraphs delimited by blank lines into HTML text enclosed
in <p> tags. | Below is the the instruction that describes the task:
### Input:
Convert paragraphs delimited by blank lines into HTML text enclosed
in <p> tags.
### Response:
def htmlize_paragraphs(text):
"""
Convert paragraphs delimited by blank lines into HTML text enclosed
in <p> tags.
"""
paragraphs = re.split('(\r?\n)\s*(\r?\n)', text)
return '\n'.join('<p>%s</p>' % paragraph for paragraph in paragraphs) |
def tag(self, text):
"""Retrieves list of keywords in text.
Parameters
----------
text: Text
The text to search for events.
Returns
-------
list of vents sorted by start, end
"""
if self.search_method == 'ahocorasick':
events = self._find_keywords_ahocorasick(text.text)
elif self.search_method == 'naive':
events = self._find_keywords_naive(text.text)
events = self._resolve_conflicts(events)
if self.mapping:
for item in events:
item['type'] = self.map[
text.text[item['start']:item['end']]
]
if self.return_layer:
return events
else:
text[self.layer_name] = events | Retrieves list of keywords in text.
Parameters
----------
text: Text
The text to search for events.
Returns
-------
list of vents sorted by start, end | Below is the the instruction that describes the task:
### Input:
Retrieves list of keywords in text.
Parameters
----------
text: Text
The text to search for events.
Returns
-------
list of vents sorted by start, end
### Response:
def tag(self, text):
"""Retrieves list of keywords in text.
Parameters
----------
text: Text
The text to search for events.
Returns
-------
list of vents sorted by start, end
"""
if self.search_method == 'ahocorasick':
events = self._find_keywords_ahocorasick(text.text)
elif self.search_method == 'naive':
events = self._find_keywords_naive(text.text)
events = self._resolve_conflicts(events)
if self.mapping:
for item in events:
item['type'] = self.map[
text.text[item['start']:item['end']]
]
if self.return_layer:
return events
else:
text[self.layer_name] = events |
def _presence_listener(self, event: Dict[str, Any]):
"""
Update cached user presence state from Matrix presence events.
Due to the possibility of nodes using accounts on multiple homeservers a composite
address state is synthesised from the cached individual user presence states.
"""
if self._stop_event.ready():
return
user_id = event['sender']
if event['type'] != 'm.presence' or user_id == self._user_id:
return
user = self._get_user(user_id)
user.displayname = event['content'].get('displayname') or user.displayname
address = self._validate_userid_signature(user)
if not address:
# Malformed address - skip
return
# not a user we've whitelisted, skip
if not self.is_address_known(address):
return
self.add_userid_for_address(address, user_id)
new_state = UserPresence(event['content']['presence'])
if new_state == self._userid_to_presence.get(user_id):
# Cached presence state matches, no action required
return
self._userid_to_presence[user_id] = new_state
self.refresh_address_presence(address)
if self._user_presence_changed_callback:
self._user_presence_changed_callback(user, new_state) | Update cached user presence state from Matrix presence events.
Due to the possibility of nodes using accounts on multiple homeservers a composite
address state is synthesised from the cached individual user presence states. | Below is the the instruction that describes the task:
### Input:
Update cached user presence state from Matrix presence events.
Due to the possibility of nodes using accounts on multiple homeservers a composite
address state is synthesised from the cached individual user presence states.
### Response:
def _presence_listener(self, event: Dict[str, Any]):
"""
Update cached user presence state from Matrix presence events.
Due to the possibility of nodes using accounts on multiple homeservers a composite
address state is synthesised from the cached individual user presence states.
"""
if self._stop_event.ready():
return
user_id = event['sender']
if event['type'] != 'm.presence' or user_id == self._user_id:
return
user = self._get_user(user_id)
user.displayname = event['content'].get('displayname') or user.displayname
address = self._validate_userid_signature(user)
if not address:
# Malformed address - skip
return
# not a user we've whitelisted, skip
if not self.is_address_known(address):
return
self.add_userid_for_address(address, user_id)
new_state = UserPresence(event['content']['presence'])
if new_state == self._userid_to_presence.get(user_id):
# Cached presence state matches, no action required
return
self._userid_to_presence[user_id] = new_state
self.refresh_address_presence(address)
if self._user_presence_changed_callback:
self._user_presence_changed_callback(user, new_state) |
def process_agreement_events_publisher(publisher_account, agreement_id, did, service_agreement,
price, consumer_address, condition_ids):
"""
Process the agreement events during the register of the service agreement for the publisher side
:param publisher_account: Account instance of the publisher
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param service_agreement: ServiceAgreement instance
:param price: Asset price, int
:param consumer_address: ethereum account address of consumer, hex str
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:return:
"""
conditions_dict = service_agreement.condition_by_name
events_manager = EventsManager.get_instance(Keeper.get_instance())
events_manager.watch_lock_reward_event(
agreement_id,
access_secret_store_condition.fulfillAccessSecretStoreCondition,
None,
(agreement_id, did, service_agreement,
consumer_address, publisher_account),
conditions_dict['lockReward'].timeout
)
events_manager.watch_access_event(
agreement_id,
escrow_reward_condition.fulfillEscrowRewardCondition,
None,
(agreement_id, service_agreement,
price, consumer_address, publisher_account, condition_ids),
conditions_dict['accessSecretStore'].timeout
)
events_manager.watch_reward_event(
agreement_id,
verify_reward_condition.verifyRewardTokens,
None,
(agreement_id, did, service_agreement,
price, consumer_address, publisher_account),
conditions_dict['escrowReward'].timeout
) | Process the agreement events during the register of the service agreement for the publisher side
:param publisher_account: Account instance of the publisher
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param service_agreement: ServiceAgreement instance
:param price: Asset price, int
:param consumer_address: ethereum account address of consumer, hex str
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:return: | Below is the the instruction that describes the task:
### Input:
Process the agreement events during the register of the service agreement for the publisher side
:param publisher_account: Account instance of the publisher
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param service_agreement: ServiceAgreement instance
:param price: Asset price, int
:param consumer_address: ethereum account address of consumer, hex str
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:return:
### Response:
def process_agreement_events_publisher(publisher_account, agreement_id, did, service_agreement,
price, consumer_address, condition_ids):
"""
Process the agreement events during the register of the service agreement for the publisher side
:param publisher_account: Account instance of the publisher
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param service_agreement: ServiceAgreement instance
:param price: Asset price, int
:param consumer_address: ethereum account address of consumer, hex str
:param condition_ids: is a list of bytes32 content-addressed Condition IDs, bytes32
:return:
"""
conditions_dict = service_agreement.condition_by_name
events_manager = EventsManager.get_instance(Keeper.get_instance())
events_manager.watch_lock_reward_event(
agreement_id,
access_secret_store_condition.fulfillAccessSecretStoreCondition,
None,
(agreement_id, did, service_agreement,
consumer_address, publisher_account),
conditions_dict['lockReward'].timeout
)
events_manager.watch_access_event(
agreement_id,
escrow_reward_condition.fulfillEscrowRewardCondition,
None,
(agreement_id, service_agreement,
price, consumer_address, publisher_account, condition_ids),
conditions_dict['accessSecretStore'].timeout
)
events_manager.watch_reward_event(
agreement_id,
verify_reward_condition.verifyRewardTokens,
None,
(agreement_id, did, service_agreement,
price, consumer_address, publisher_account),
conditions_dict['escrowReward'].timeout
) |
def top(self, topn=10):
"""
Get a list of the top ``topn`` features in this :class:`.Feature`\.
Examples
--------
.. code-block:: python
>>> myFeature = Feature([('the', 2), ('pine', 1), ('trapezoid', 5)])
>>> myFeature.top(1)
[('trapezoid', 5)]
Parameters
----------
topn : int
Returns
-------
list
"""
return [self[i] for i in argsort(list(zip(*self))[1])[::-1][:topn]] | Get a list of the top ``topn`` features in this :class:`.Feature`\.
Examples
--------
.. code-block:: python
>>> myFeature = Feature([('the', 2), ('pine', 1), ('trapezoid', 5)])
>>> myFeature.top(1)
[('trapezoid', 5)]
Parameters
----------
topn : int
Returns
-------
list | Below is the the instruction that describes the task:
### Input:
Get a list of the top ``topn`` features in this :class:`.Feature`\.
Examples
--------
.. code-block:: python
>>> myFeature = Feature([('the', 2), ('pine', 1), ('trapezoid', 5)])
>>> myFeature.top(1)
[('trapezoid', 5)]
Parameters
----------
topn : int
Returns
-------
list
### Response:
def top(self, topn=10):
"""
Get a list of the top ``topn`` features in this :class:`.Feature`\.
Examples
--------
.. code-block:: python
>>> myFeature = Feature([('the', 2), ('pine', 1), ('trapezoid', 5)])
>>> myFeature.top(1)
[('trapezoid', 5)]
Parameters
----------
topn : int
Returns
-------
list
"""
return [self[i] for i in argsort(list(zip(*self))[1])[::-1][:topn]] |
def success(self):
"""return True if a response status is Success or Follows:
.. code-block:: python
>>> resp = Message({'Response': 'Success'})
>>> print(resp.success)
True
>>> resp['Response'] = 'Failed'
>>> resp.success
False
"""
if 'event' in self:
return True
if self.response in self.success_responses:
return True
return False | return True if a response status is Success or Follows:
.. code-block:: python
>>> resp = Message({'Response': 'Success'})
>>> print(resp.success)
True
>>> resp['Response'] = 'Failed'
>>> resp.success
False | Below is the the instruction that describes the task:
### Input:
return True if a response status is Success or Follows:
.. code-block:: python
>>> resp = Message({'Response': 'Success'})
>>> print(resp.success)
True
>>> resp['Response'] = 'Failed'
>>> resp.success
False
### Response:
def success(self):
"""return True if a response status is Success or Follows:
.. code-block:: python
>>> resp = Message({'Response': 'Success'})
>>> print(resp.success)
True
>>> resp['Response'] = 'Failed'
>>> resp.success
False
"""
if 'event' in self:
return True
if self.response in self.success_responses:
return True
return False |
def make_pymol(pdb_file, cutoff=7.0, min_kihs=2, outfile=None):
""" Pymol script for viewing classic coiled-coil Socket output.
Notes
-----
For examples of these views, browse the CC+ database here: http://coiledcoils.chm.bris.ac.uk/ccplus/search/.
Parameters
----------
pdb_file: str
Path to a pdb_file.
cutoff: float
Socket cutoff in Angstroms.
min_kihs: int
Mininmum number of KnobIntoHole interactions between pairs of helices needed to define a coiled coil.
outfile: None or str
Path to a output file to save the pml script.
Returns
-------
script_string: str
Pymol commands for classic coiled-coil view.
"""
a = convert_pdb_to_ampal(pdb=pdb_file, path=True)
kg = KnobGroup.from_helices(a, cutoff=cutoff)
g = kg.filter_graph(kg.graph, cutoff=cutoff, min_kihs=min_kihs)
ccs = sorted_connected_components(g)
# Opens pymol script, initial set up of screen
script_lines = ['load {0}'.format(pdb_file)]
script_lines.append("hide all")
script_lines.append("bg_color white")
script_lines.append("set antialias, 1")
script_lines.append("set cartoon_dumbbell_length, 0.35")
script_lines.append("set_color lightgrey, [240,240,240]")
script_lines.append("set depth_cue, 0")
script_lines.append("color lightgrey, all")
script_lines.append("cartoon dumbbell")
script_lines.append("show cartoon")
for cc_number, cc in enumerate(ccs):
helices = [x for x in g.nodes() if x.number in cc.nodes()]
#helices = cc.nodes()
cc_region = kg.get_coiledcoil_region(cc_number=cc_number, cutoff=cutoff, min_kihs=min_kihs)
tag_residues_with_heptad_register(cc_region)
assigned_regions = kg.get_assigned_regions(include_alt_states=False, complementary_only=False, helices=helices)
helix_starts = [int(h[0].id) for h in helices]
helix_ends = [int(h[-1].id) for h in helices]
chains = [h.ampal_parent.id for h in helices]
assigned_starts = [assigned_regions[h.number][0] for h in helices]
assigned_ends = [assigned_regions[h.number][1] for h in helices]
assigned_selections = ['{0}/{1}-{2}/'.format(chain, assigned_start, assigned_end)
for chain, assigned_start, assigned_end in zip(chains, assigned_starts, assigned_ends)]
script_lines.append("select cc{0}, {1}".format(cc_number, ' '.join(assigned_selections)))
script_lines.append("cartoon automatic, cc{0}".format(cc_number))
for h_number, h in enumerate(helices):
chain = chains[h_number]
helix_start = helix_starts[h_number]
helix_end = helix_ends[h_number]
assigned_start = assigned_starts[h_number]
assigned_end = assigned_ends[h_number]
selection = '{0}/{1}-{2}/'.format(chain, helix_start, helix_end)
script_lines.append("select cc{0}eh{1}, {2}".format(cc_number, h_number, selection))
selection = '{0}/{1}-{2}/'.format(chain, assigned_start, assigned_end)
script_lines.append("select cc{0}ah{1}, {2}".format(cc_number, h_number, selection))
kihs = [x for x in kg if x.knob_helix == h]
for x in kihs:
knob_selection_name = 'cc{0}ah{1}k{2}'.format(cc_number, h_number, x.knob_residue.id)
hole_selection_name = knob_selection_name + 'hole'
knob_selection = '{0}/{1}/'.format(chain, x.knob_residue.id)
script_lines.append('select {0}, {1}'.format(knob_selection_name, knob_selection))
hole_selection = ' '.join(['{0}/{1}/'.format(x.hole_chain, y.id) for y in x.hole_residues])
script_lines.append('select {0}, {1}'.format(hole_selection_name, hole_selection))
script_lines.append('show sticks, {0}'.format(knob_selection_name))
script_lines.append('show sticks, {0}'.format(hole_selection_name))
for r in h.get_monomers():
if 'register' in r.tags:
color = _heptad_colours[r.tags['register']]
script_lines.append('color {0}, {1}/{2}/'.format(color, chain, r.id))
script_lines.append('deselect')
script_lines.append('orient')
script_lines.append('rotate z, 90')
script_lines.append('zoom complete=1')
script_string = '\n'.join(script_lines)
if outfile is not None:
if isinstance(outfile, str) and outfile[-3:] == 'pml':
with open(outfile, 'w') as foo:
foo.write(script_string)
return script_string | Pymol script for viewing classic coiled-coil Socket output.
Notes
-----
For examples of these views, browse the CC+ database here: http://coiledcoils.chm.bris.ac.uk/ccplus/search/.
Parameters
----------
pdb_file: str
Path to a pdb_file.
cutoff: float
Socket cutoff in Angstroms.
min_kihs: int
Mininmum number of KnobIntoHole interactions between pairs of helices needed to define a coiled coil.
outfile: None or str
Path to a output file to save the pml script.
Returns
-------
script_string: str
Pymol commands for classic coiled-coil view. | Below is the the instruction that describes the task:
### Input:
Pymol script for viewing classic coiled-coil Socket output.
Notes
-----
For examples of these views, browse the CC+ database here: http://coiledcoils.chm.bris.ac.uk/ccplus/search/.
Parameters
----------
pdb_file: str
Path to a pdb_file.
cutoff: float
Socket cutoff in Angstroms.
min_kihs: int
Mininmum number of KnobIntoHole interactions between pairs of helices needed to define a coiled coil.
outfile: None or str
Path to a output file to save the pml script.
Returns
-------
script_string: str
Pymol commands for classic coiled-coil view.
### Response:
def make_pymol(pdb_file, cutoff=7.0, min_kihs=2, outfile=None):
""" Pymol script for viewing classic coiled-coil Socket output.
Notes
-----
For examples of these views, browse the CC+ database here: http://coiledcoils.chm.bris.ac.uk/ccplus/search/.
Parameters
----------
pdb_file: str
Path to a pdb_file.
cutoff: float
Socket cutoff in Angstroms.
min_kihs: int
Mininmum number of KnobIntoHole interactions between pairs of helices needed to define a coiled coil.
outfile: None or str
Path to a output file to save the pml script.
Returns
-------
script_string: str
Pymol commands for classic coiled-coil view.
"""
a = convert_pdb_to_ampal(pdb=pdb_file, path=True)
kg = KnobGroup.from_helices(a, cutoff=cutoff)
g = kg.filter_graph(kg.graph, cutoff=cutoff, min_kihs=min_kihs)
ccs = sorted_connected_components(g)
# Opens pymol script, initial set up of screen
script_lines = ['load {0}'.format(pdb_file)]
script_lines.append("hide all")
script_lines.append("bg_color white")
script_lines.append("set antialias, 1")
script_lines.append("set cartoon_dumbbell_length, 0.35")
script_lines.append("set_color lightgrey, [240,240,240]")
script_lines.append("set depth_cue, 0")
script_lines.append("color lightgrey, all")
script_lines.append("cartoon dumbbell")
script_lines.append("show cartoon")
for cc_number, cc in enumerate(ccs):
helices = [x for x in g.nodes() if x.number in cc.nodes()]
#helices = cc.nodes()
cc_region = kg.get_coiledcoil_region(cc_number=cc_number, cutoff=cutoff, min_kihs=min_kihs)
tag_residues_with_heptad_register(cc_region)
assigned_regions = kg.get_assigned_regions(include_alt_states=False, complementary_only=False, helices=helices)
helix_starts = [int(h[0].id) for h in helices]
helix_ends = [int(h[-1].id) for h in helices]
chains = [h.ampal_parent.id for h in helices]
assigned_starts = [assigned_regions[h.number][0] for h in helices]
assigned_ends = [assigned_regions[h.number][1] for h in helices]
assigned_selections = ['{0}/{1}-{2}/'.format(chain, assigned_start, assigned_end)
for chain, assigned_start, assigned_end in zip(chains, assigned_starts, assigned_ends)]
script_lines.append("select cc{0}, {1}".format(cc_number, ' '.join(assigned_selections)))
script_lines.append("cartoon automatic, cc{0}".format(cc_number))
for h_number, h in enumerate(helices):
chain = chains[h_number]
helix_start = helix_starts[h_number]
helix_end = helix_ends[h_number]
assigned_start = assigned_starts[h_number]
assigned_end = assigned_ends[h_number]
selection = '{0}/{1}-{2}/'.format(chain, helix_start, helix_end)
script_lines.append("select cc{0}eh{1}, {2}".format(cc_number, h_number, selection))
selection = '{0}/{1}-{2}/'.format(chain, assigned_start, assigned_end)
script_lines.append("select cc{0}ah{1}, {2}".format(cc_number, h_number, selection))
kihs = [x for x in kg if x.knob_helix == h]
for x in kihs:
knob_selection_name = 'cc{0}ah{1}k{2}'.format(cc_number, h_number, x.knob_residue.id)
hole_selection_name = knob_selection_name + 'hole'
knob_selection = '{0}/{1}/'.format(chain, x.knob_residue.id)
script_lines.append('select {0}, {1}'.format(knob_selection_name, knob_selection))
hole_selection = ' '.join(['{0}/{1}/'.format(x.hole_chain, y.id) for y in x.hole_residues])
script_lines.append('select {0}, {1}'.format(hole_selection_name, hole_selection))
script_lines.append('show sticks, {0}'.format(knob_selection_name))
script_lines.append('show sticks, {0}'.format(hole_selection_name))
for r in h.get_monomers():
if 'register' in r.tags:
color = _heptad_colours[r.tags['register']]
script_lines.append('color {0}, {1}/{2}/'.format(color, chain, r.id))
script_lines.append('deselect')
script_lines.append('orient')
script_lines.append('rotate z, 90')
script_lines.append('zoom complete=1')
script_string = '\n'.join(script_lines)
if outfile is not None:
if isinstance(outfile, str) and outfile[-3:] == 'pml':
with open(outfile, 'w') as foo:
foo.write(script_string)
return script_string |
def get_container_setting(name, container, settings):
'''
Get the value of the setting for the IIS container.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS container.
container (str): The type of IIS container. The container types are:
AppPools, Sites, SslBindings
settings (dict): A dictionary of the setting names and their values.
Returns:
dict: A dictionary of the provided settings and their values.
CLI Example:
.. code-block:: bash
salt '*' win_iis.get_container_setting name='MyTestPool' container='AppPools'
settings="['processModel.identityType']"
'''
ret = dict()
ps_cmd = list()
ps_cmd_validate = list()
container_path = r"IIS:\{0}\{1}".format(container, name)
if not settings:
log.warning('No settings provided')
return ret
ps_cmd.append(r'$Settings = @{};')
for setting in settings:
# Build the commands to verify that the property names are valid.
ps_cmd_validate.extend(['Get-ItemProperty',
'-Path', "'{0}'".format(container_path),
'-Name', "'{0}'".format(setting),
'-ErrorAction', 'Stop',
'|', 'Out-Null;'])
# Some ItemProperties are Strings and others are ConfigurationAttributes.
# Since the former doesn't have a Value property, we need to account
# for this.
ps_cmd.append("$Property = Get-ItemProperty -Path '{0}'".format(container_path))
ps_cmd.append("-Name '{0}' -ErrorAction Stop;".format(setting))
ps_cmd.append(r'if (([String]::IsNullOrEmpty($Property) -eq $False) -and')
ps_cmd.append(r"($Property.GetType()).Name -eq 'ConfigurationAttribute') {")
ps_cmd.append(r'$Property = $Property | Select-Object')
ps_cmd.append(r'-ExpandProperty Value };')
ps_cmd.append("$Settings['{0}'] = [String] $Property;".format(setting))
ps_cmd.append(r'$Property = $Null;')
# Validate the setting names that were passed in.
cmd_ret = _srvmgr(cmd=ps_cmd_validate, return_json=True)
if cmd_ret['retcode'] != 0:
message = 'One or more invalid property names were specified for the provided container.'
raise SaltInvocationError(message)
ps_cmd.append('$Settings')
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
if isinstance(items, list):
ret.update(items[0])
else:
ret.update(items)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
return ret | Get the value of the setting for the IIS container.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS container.
container (str): The type of IIS container. The container types are:
AppPools, Sites, SslBindings
settings (dict): A dictionary of the setting names and their values.
Returns:
dict: A dictionary of the provided settings and their values.
CLI Example:
.. code-block:: bash
salt '*' win_iis.get_container_setting name='MyTestPool' container='AppPools'
settings="['processModel.identityType']" | Below is the the instruction that describes the task:
### Input:
Get the value of the setting for the IIS container.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS container.
container (str): The type of IIS container. The container types are:
AppPools, Sites, SslBindings
settings (dict): A dictionary of the setting names and their values.
Returns:
dict: A dictionary of the provided settings and their values.
CLI Example:
.. code-block:: bash
salt '*' win_iis.get_container_setting name='MyTestPool' container='AppPools'
settings="['processModel.identityType']"
### Response:
def get_container_setting(name, container, settings):
'''
Get the value of the setting for the IIS container.
.. versionadded:: 2016.11.0
Args:
name (str): The name of the IIS container.
container (str): The type of IIS container. The container types are:
AppPools, Sites, SslBindings
settings (dict): A dictionary of the setting names and their values.
Returns:
dict: A dictionary of the provided settings and their values.
CLI Example:
.. code-block:: bash
salt '*' win_iis.get_container_setting name='MyTestPool' container='AppPools'
settings="['processModel.identityType']"
'''
ret = dict()
ps_cmd = list()
ps_cmd_validate = list()
container_path = r"IIS:\{0}\{1}".format(container, name)
if not settings:
log.warning('No settings provided')
return ret
ps_cmd.append(r'$Settings = @{};')
for setting in settings:
# Build the commands to verify that the property names are valid.
ps_cmd_validate.extend(['Get-ItemProperty',
'-Path', "'{0}'".format(container_path),
'-Name', "'{0}'".format(setting),
'-ErrorAction', 'Stop',
'|', 'Out-Null;'])
# Some ItemProperties are Strings and others are ConfigurationAttributes.
# Since the former doesn't have a Value property, we need to account
# for this.
ps_cmd.append("$Property = Get-ItemProperty -Path '{0}'".format(container_path))
ps_cmd.append("-Name '{0}' -ErrorAction Stop;".format(setting))
ps_cmd.append(r'if (([String]::IsNullOrEmpty($Property) -eq $False) -and')
ps_cmd.append(r"($Property.GetType()).Name -eq 'ConfigurationAttribute') {")
ps_cmd.append(r'$Property = $Property | Select-Object')
ps_cmd.append(r'-ExpandProperty Value };')
ps_cmd.append("$Settings['{0}'] = [String] $Property;".format(setting))
ps_cmd.append(r'$Property = $Null;')
# Validate the setting names that were passed in.
cmd_ret = _srvmgr(cmd=ps_cmd_validate, return_json=True)
if cmd_ret['retcode'] != 0:
message = 'One or more invalid property names were specified for the provided container.'
raise SaltInvocationError(message)
ps_cmd.append('$Settings')
cmd_ret = _srvmgr(cmd=ps_cmd, return_json=True)
try:
items = salt.utils.json.loads(cmd_ret['stdout'], strict=False)
if isinstance(items, list):
ret.update(items[0])
else:
ret.update(items)
except ValueError:
raise CommandExecutionError('Unable to parse return data as Json.')
return ret |
def staticmap(ctx, mapid, output, features, lat, lon, zoom, size):
"""
Generate static map images from existing Mapbox map ids.
Optionally overlay with geojson features.
$ mapbox staticmap --features features.geojson mapbox.satellite out.png
$ mapbox staticmap --lon -61.7 --lat 12.1 --zoom 12 mapbox.satellite out2.png
An access token is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
if features:
features = list(
cligj.normalize_feature_inputs(None, 'features', [features]))
service = mapbox.Static(access_token=access_token)
try:
res = service.image(
mapid,
lon=lon, lat=lat, z=zoom,
width=size[0], height=size[1],
features=features, sort_keys=True)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if res.status_code == 200:
output.write(res.content)
else:
raise MapboxCLIException(res.text.strip()) | Generate static map images from existing Mapbox map ids.
Optionally overlay with geojson features.
$ mapbox staticmap --features features.geojson mapbox.satellite out.png
$ mapbox staticmap --lon -61.7 --lat 12.1 --zoom 12 mapbox.satellite out2.png
An access token is required, see `mapbox --help`. | Below is the the instruction that describes the task:
### Input:
Generate static map images from existing Mapbox map ids.
Optionally overlay with geojson features.
$ mapbox staticmap --features features.geojson mapbox.satellite out.png
$ mapbox staticmap --lon -61.7 --lat 12.1 --zoom 12 mapbox.satellite out2.png
An access token is required, see `mapbox --help`.
### Response:
def staticmap(ctx, mapid, output, features, lat, lon, zoom, size):
"""
Generate static map images from existing Mapbox map ids.
Optionally overlay with geojson features.
$ mapbox staticmap --features features.geojson mapbox.satellite out.png
$ mapbox staticmap --lon -61.7 --lat 12.1 --zoom 12 mapbox.satellite out2.png
An access token is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
if features:
features = list(
cligj.normalize_feature_inputs(None, 'features', [features]))
service = mapbox.Static(access_token=access_token)
try:
res = service.image(
mapid,
lon=lon, lat=lat, z=zoom,
width=size[0], height=size[1],
features=features, sort_keys=True)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if res.status_code == 200:
output.write(res.content)
else:
raise MapboxCLIException(res.text.strip()) |
def _apply_rules(expr, rules):
"""Recursively re-instantiate the expression, while applying all of the
given `rules` to all encountered (sub-) expressions
Args:
expr: Any Expression or scalar object
rules (list, ~collections.OrderedDict): A list of rules dictionary
mapping names to rules, where each rule is a tuple ``(pattern,
replacement)`` where `pattern` is an instance of :class:`.Pattern`)
and `replacement` is a callable. The pattern will be matched
against any expression that is encountered during the
re-instantiation. If the `pattern` matches, then the
(sub-)expression is replaced by the result of calling `replacement`
while passing any wildcards from `pattern` as keyword arguments. If
`replacement` raises :exc:`.CannotSimplify`, it will be ignored
Note:
Instead of or in addition to passing `rules`, `simplify` can often be
combined with e.g. `extra_rules` / `extra_binary_rules` context
managers. If a simplification can be handled through these context
managers, this is usually more efficient than an equivalent rule.
However, both really are complementary: the rules defined in the
context managers are applied *before* instantiation (hence these these
patterns are instantiated through `pattern_head`). In contrast, the
patterns defined in `rules` are applied against instantiated
expressions.
"""
if LOG:
logger = logging.getLogger('QNET.create')
stack = []
path = []
if isinstance(expr, Expression):
stack.append(ProtoExpr.from_expr(expr))
path.append(0)
if LOG:
logger.debug(
"Starting at level 1: placing expr on stack: %s", expr)
while True:
i = path[-1]
try:
arg = stack[-1][i]
if LOG:
logger.debug(
"At level %d: considering arg %d: %s",
len(stack), i+1, arg)
except IndexError:
# done at this level
path.pop()
expr = stack.pop().instantiate()
expr = _apply_rules_no_recurse(expr, rules)
if len(stack) == 0:
if LOG:
logger.debug(
"Complete level 1: returning simplified expr: %s",
expr)
return expr
else:
stack[-1][path[-1]] = expr
path[-1] += 1
if LOG:
logger.debug(
"Complete level %d. At level %d, setting arg %d "
"to simplified expr: %s", len(stack)+1, len(stack),
path[-1], expr)
else:
if isinstance(arg, Expression):
stack.append(ProtoExpr.from_expr(arg))
path.append(0)
if LOG:
logger.debug(" placing arg on stack")
else: # scalar
stack[-1][i] = _apply_rules_no_recurse(arg, rules)
if LOG:
logger.debug(
" arg is leaf, replacing with simplified expr: "
"%s", stack[-1][i])
path[-1] += 1
else:
return _apply_rules_no_recurse(expr, rules) | Recursively re-instantiate the expression, while applying all of the
given `rules` to all encountered (sub-) expressions
Args:
expr: Any Expression or scalar object
rules (list, ~collections.OrderedDict): A list of rules dictionary
mapping names to rules, where each rule is a tuple ``(pattern,
replacement)`` where `pattern` is an instance of :class:`.Pattern`)
and `replacement` is a callable. The pattern will be matched
against any expression that is encountered during the
re-instantiation. If the `pattern` matches, then the
(sub-)expression is replaced by the result of calling `replacement`
while passing any wildcards from `pattern` as keyword arguments. If
`replacement` raises :exc:`.CannotSimplify`, it will be ignored
Note:
Instead of or in addition to passing `rules`, `simplify` can often be
combined with e.g. `extra_rules` / `extra_binary_rules` context
managers. If a simplification can be handled through these context
managers, this is usually more efficient than an equivalent rule.
However, both really are complementary: the rules defined in the
context managers are applied *before* instantiation (hence these these
patterns are instantiated through `pattern_head`). In contrast, the
patterns defined in `rules` are applied against instantiated
expressions. | Below is the the instruction that describes the task:
### Input:
Recursively re-instantiate the expression, while applying all of the
given `rules` to all encountered (sub-) expressions
Args:
expr: Any Expression or scalar object
rules (list, ~collections.OrderedDict): A list of rules dictionary
mapping names to rules, where each rule is a tuple ``(pattern,
replacement)`` where `pattern` is an instance of :class:`.Pattern`)
and `replacement` is a callable. The pattern will be matched
against any expression that is encountered during the
re-instantiation. If the `pattern` matches, then the
(sub-)expression is replaced by the result of calling `replacement`
while passing any wildcards from `pattern` as keyword arguments. If
`replacement` raises :exc:`.CannotSimplify`, it will be ignored
Note:
Instead of or in addition to passing `rules`, `simplify` can often be
combined with e.g. `extra_rules` / `extra_binary_rules` context
managers. If a simplification can be handled through these context
managers, this is usually more efficient than an equivalent rule.
However, both really are complementary: the rules defined in the
context managers are applied *before* instantiation (hence these these
patterns are instantiated through `pattern_head`). In contrast, the
patterns defined in `rules` are applied against instantiated
expressions.
### Response:
def _apply_rules(expr, rules):
"""Recursively re-instantiate the expression, while applying all of the
given `rules` to all encountered (sub-) expressions
Args:
expr: Any Expression or scalar object
rules (list, ~collections.OrderedDict): A list of rules dictionary
mapping names to rules, where each rule is a tuple ``(pattern,
replacement)`` where `pattern` is an instance of :class:`.Pattern`)
and `replacement` is a callable. The pattern will be matched
against any expression that is encountered during the
re-instantiation. If the `pattern` matches, then the
(sub-)expression is replaced by the result of calling `replacement`
while passing any wildcards from `pattern` as keyword arguments. If
`replacement` raises :exc:`.CannotSimplify`, it will be ignored
Note:
Instead of or in addition to passing `rules`, `simplify` can often be
combined with e.g. `extra_rules` / `extra_binary_rules` context
managers. If a simplification can be handled through these context
managers, this is usually more efficient than an equivalent rule.
However, both really are complementary: the rules defined in the
context managers are applied *before* instantiation (hence these these
patterns are instantiated through `pattern_head`). In contrast, the
patterns defined in `rules` are applied against instantiated
expressions.
"""
if LOG:
logger = logging.getLogger('QNET.create')
stack = []
path = []
if isinstance(expr, Expression):
stack.append(ProtoExpr.from_expr(expr))
path.append(0)
if LOG:
logger.debug(
"Starting at level 1: placing expr on stack: %s", expr)
while True:
i = path[-1]
try:
arg = stack[-1][i]
if LOG:
logger.debug(
"At level %d: considering arg %d: %s",
len(stack), i+1, arg)
except IndexError:
# done at this level
path.pop()
expr = stack.pop().instantiate()
expr = _apply_rules_no_recurse(expr, rules)
if len(stack) == 0:
if LOG:
logger.debug(
"Complete level 1: returning simplified expr: %s",
expr)
return expr
else:
stack[-1][path[-1]] = expr
path[-1] += 1
if LOG:
logger.debug(
"Complete level %d. At level %d, setting arg %d "
"to simplified expr: %s", len(stack)+1, len(stack),
path[-1], expr)
else:
if isinstance(arg, Expression):
stack.append(ProtoExpr.from_expr(arg))
path.append(0)
if LOG:
logger.debug(" placing arg on stack")
else: # scalar
stack[-1][i] = _apply_rules_no_recurse(arg, rules)
if LOG:
logger.debug(
" arg is leaf, replacing with simplified expr: "
"%s", stack[-1][i])
path[-1] += 1
else:
return _apply_rules_no_recurse(expr, rules) |
def find_proc_date(header):
"""Search the HISTORY fields of a header looking for the FLIPS
processing date.
"""
import string, re
for h in header.ascardlist():
if h.key=="HISTORY":
g=h.value
if ( string.find(g,'FLIPS 1.0 -:') ):
result=re.search('imred: FLIPS 1.0 - \S{3} (.*) - ([\s\d]\d:\d\d:\d\d)\s*$',g)
if result:
date=result.group(1)
time=result.group(2)
datetime=date+" "+time
return datetime
return None | Search the HISTORY fields of a header looking for the FLIPS
processing date. | Below is the the instruction that describes the task:
### Input:
Search the HISTORY fields of a header looking for the FLIPS
processing date.
### Response:
def find_proc_date(header):
"""Search the HISTORY fields of a header looking for the FLIPS
processing date.
"""
import string, re
for h in header.ascardlist():
if h.key=="HISTORY":
g=h.value
if ( string.find(g,'FLIPS 1.0 -:') ):
result=re.search('imred: FLIPS 1.0 - \S{3} (.*) - ([\s\d]\d:\d\d:\d\d)\s*$',g)
if result:
date=result.group(1)
time=result.group(2)
datetime=date+" "+time
return datetime
return None |
def phids(self, *phids):
"""Retrieve data about PHIDs.
:params phids: list of PHIDs
"""
params = {
self.PHIDS: phids
}
response = self._call(self.PHAB_PHIDS, params)
return response | Retrieve data about PHIDs.
:params phids: list of PHIDs | Below is the the instruction that describes the task:
### Input:
Retrieve data about PHIDs.
:params phids: list of PHIDs
### Response:
def phids(self, *phids):
"""Retrieve data about PHIDs.
:params phids: list of PHIDs
"""
params = {
self.PHIDS: phids
}
response = self._call(self.PHAB_PHIDS, params)
return response |
def create_project(self, name, client_id, budget = None, budget_by =
'none', notes = None, billable = True):
'''Creates a Project with the given information.'''
project = {'project':{
'name': name,
'client_id': client_id,
'budget_by': budget_by,
'budget': budget,
'notes': notes,
'billable': billable,
}}
response = self.post_request('projects/', project, follow = True)
if response:
return Project(self, response['project']) | Creates a Project with the given information. | Below is the the instruction that describes the task:
### Input:
Creates a Project with the given information.
### Response:
def create_project(self, name, client_id, budget = None, budget_by =
'none', notes = None, billable = True):
'''Creates a Project with the given information.'''
project = {'project':{
'name': name,
'client_id': client_id,
'budget_by': budget_by,
'budget': budget,
'notes': notes,
'billable': billable,
}}
response = self.post_request('projects/', project, follow = True)
if response:
return Project(self, response['project']) |
def compcor_variance_plot(metadata_files, metadata_sources=None,
output_file=None, varexp_thresh=(0.5, 0.7, 0.9),
fig=None):
"""
Parameters
----------
metadata_files: list
List of paths to files containing component metadata. If more than one
decomposition has been performed (e.g., anatomical and temporal
CompCor decompositions), then all metadata files can be provided in
the list. However, each metadata file should have a corresponding
entry in `metadata_sources`.
metadata_sources: list or None
List of source names (e.g., ['aCompCor']) for decompositions. This
list should be of the same length as `metadata_files`.
output_file: str or None
Path where the output figure should be saved. If this is not defined,
then the plotting axes will be returned instead of the saved figure
path.
varexp_thresh: tuple
Set of variance thresholds to include in the plot (default 0.5, 0.7,
0.9).
fig: figure or None
Existing figure on which to plot.
Returns
-------
ax: axes
Plotting axes. Returned only if the `output_file` parameter is None.
output_file: str
The file where the figure is saved.
"""
metadata = {}
if metadata_sources is None:
if len(metadata_files) == 1:
metadata_sources = ['CompCor']
else:
metadata_sources = ['Decomposition {:d}'.format(i)
for i in range(len(metadata_files))]
for file, source in zip(metadata_files, metadata_sources):
metadata[source] = pd.read_table(str(file))
metadata[source]['source'] = source
metadata = pd.concat(list(metadata.values()))
bbox_txt = {
'boxstyle': 'round',
'fc': 'white',
'ec': 'none',
'color': 'none',
'linewidth': 0,
'alpha': 0.8
}
decompositions = []
data_sources = list(metadata.groupby(['source', 'mask']).groups.keys())
for source, mask in data_sources:
if not np.isnan(
metadata.loc[
(metadata['source'] == source)
& (metadata['mask'] == mask)
]['singular_value'].values[0]):
decompositions.append((source, mask))
if fig is not None:
ax = [fig.add_subplot(1, len(decompositions), i+1)
for i in range(len(decompositions))]
elif len(decompositions) > 1:
fig, ax = plt.subplots(1, len(decompositions),
figsize=(5*len(decompositions), 5))
else:
ax = [plt.axes()]
for m, (source, mask) in enumerate(decompositions):
components = metadata[(metadata['mask'] == mask)
& (metadata['source'] == source)]
if len([m for s, m in decompositions if s == source]) > 1:
title_mask = ' ({} mask)'.format(mask)
else:
title_mask = ''
fig_title = '{}{}'.format(source, title_mask)
ax[m].plot(np.arange(components.shape[0]+1),
[0] + list(
100*components['cumulative_variance_explained']),
color='purple',
linewidth=2.5)
ax[m].grid(False)
ax[m].set_xlabel('number of components in model')
ax[m].set_ylabel('cumulative variance explained (%)')
ax[m].set_title(fig_title)
varexp = {}
for i, thr in enumerate(varexp_thresh):
varexp[thr] = np.searchsorted(
components['cumulative_variance_explained'], thr) + 1
ax[m].axhline(y=100*thr, color='lightgrey', linewidth=0.25)
ax[m].axvline(x=varexp[thr], color='C{}'.format(i),
linewidth=2, linestyle=':')
ax[m].text(0, 100*thr, '{:.0f}'.format(100*thr),
fontsize='x-small', bbox=bbox_txt)
ax[m].text(varexp[thr][0], 25,
'{} components explain\n{:.0f}% of variance'.format(
varexp[thr][0], 100*thr),
rotation=90,
horizontalalignment='center',
fontsize='xx-small',
bbox=bbox_txt)
ax[m].set_yticks([])
ax[m].set_yticklabels([])
for tick in ax[m].xaxis.get_major_ticks():
tick.label.set_fontsize('x-small')
tick.label.set_rotation('vertical')
for side in ['top', 'right', 'left']:
ax[m].spines[side].set_color('none')
ax[m].spines[side].set_visible(False)
if output_file is not None:
figure = plt.gcf()
figure.savefig(output_file, bbox_inches='tight')
plt.close(figure)
figure = None
return output_file
return ax | Parameters
----------
metadata_files: list
List of paths to files containing component metadata. If more than one
decomposition has been performed (e.g., anatomical and temporal
CompCor decompositions), then all metadata files can be provided in
the list. However, each metadata file should have a corresponding
entry in `metadata_sources`.
metadata_sources: list or None
List of source names (e.g., ['aCompCor']) for decompositions. This
list should be of the same length as `metadata_files`.
output_file: str or None
Path where the output figure should be saved. If this is not defined,
then the plotting axes will be returned instead of the saved figure
path.
varexp_thresh: tuple
Set of variance thresholds to include in the plot (default 0.5, 0.7,
0.9).
fig: figure or None
Existing figure on which to plot.
Returns
-------
ax: axes
Plotting axes. Returned only if the `output_file` parameter is None.
output_file: str
The file where the figure is saved. | Below is the the instruction that describes the task:
### Input:
Parameters
----------
metadata_files: list
List of paths to files containing component metadata. If more than one
decomposition has been performed (e.g., anatomical and temporal
CompCor decompositions), then all metadata files can be provided in
the list. However, each metadata file should have a corresponding
entry in `metadata_sources`.
metadata_sources: list or None
List of source names (e.g., ['aCompCor']) for decompositions. This
list should be of the same length as `metadata_files`.
output_file: str or None
Path where the output figure should be saved. If this is not defined,
then the plotting axes will be returned instead of the saved figure
path.
varexp_thresh: tuple
Set of variance thresholds to include in the plot (default 0.5, 0.7,
0.9).
fig: figure or None
Existing figure on which to plot.
Returns
-------
ax: axes
Plotting axes. Returned only if the `output_file` parameter is None.
output_file: str
The file where the figure is saved.
### Response:
def compcor_variance_plot(metadata_files, metadata_sources=None,
output_file=None, varexp_thresh=(0.5, 0.7, 0.9),
fig=None):
"""
Parameters
----------
metadata_files: list
List of paths to files containing component metadata. If more than one
decomposition has been performed (e.g., anatomical and temporal
CompCor decompositions), then all metadata files can be provided in
the list. However, each metadata file should have a corresponding
entry in `metadata_sources`.
metadata_sources: list or None
List of source names (e.g., ['aCompCor']) for decompositions. This
list should be of the same length as `metadata_files`.
output_file: str or None
Path where the output figure should be saved. If this is not defined,
then the plotting axes will be returned instead of the saved figure
path.
varexp_thresh: tuple
Set of variance thresholds to include in the plot (default 0.5, 0.7,
0.9).
fig: figure or None
Existing figure on which to plot.
Returns
-------
ax: axes
Plotting axes. Returned only if the `output_file` parameter is None.
output_file: str
The file where the figure is saved.
"""
metadata = {}
if metadata_sources is None:
if len(metadata_files) == 1:
metadata_sources = ['CompCor']
else:
metadata_sources = ['Decomposition {:d}'.format(i)
for i in range(len(metadata_files))]
for file, source in zip(metadata_files, metadata_sources):
metadata[source] = pd.read_table(str(file))
metadata[source]['source'] = source
metadata = pd.concat(list(metadata.values()))
bbox_txt = {
'boxstyle': 'round',
'fc': 'white',
'ec': 'none',
'color': 'none',
'linewidth': 0,
'alpha': 0.8
}
decompositions = []
data_sources = list(metadata.groupby(['source', 'mask']).groups.keys())
for source, mask in data_sources:
if not np.isnan(
metadata.loc[
(metadata['source'] == source)
& (metadata['mask'] == mask)
]['singular_value'].values[0]):
decompositions.append((source, mask))
if fig is not None:
ax = [fig.add_subplot(1, len(decompositions), i+1)
for i in range(len(decompositions))]
elif len(decompositions) > 1:
fig, ax = plt.subplots(1, len(decompositions),
figsize=(5*len(decompositions), 5))
else:
ax = [plt.axes()]
for m, (source, mask) in enumerate(decompositions):
components = metadata[(metadata['mask'] == mask)
& (metadata['source'] == source)]
if len([m for s, m in decompositions if s == source]) > 1:
title_mask = ' ({} mask)'.format(mask)
else:
title_mask = ''
fig_title = '{}{}'.format(source, title_mask)
ax[m].plot(np.arange(components.shape[0]+1),
[0] + list(
100*components['cumulative_variance_explained']),
color='purple',
linewidth=2.5)
ax[m].grid(False)
ax[m].set_xlabel('number of components in model')
ax[m].set_ylabel('cumulative variance explained (%)')
ax[m].set_title(fig_title)
varexp = {}
for i, thr in enumerate(varexp_thresh):
varexp[thr] = np.searchsorted(
components['cumulative_variance_explained'], thr) + 1
ax[m].axhline(y=100*thr, color='lightgrey', linewidth=0.25)
ax[m].axvline(x=varexp[thr], color='C{}'.format(i),
linewidth=2, linestyle=':')
ax[m].text(0, 100*thr, '{:.0f}'.format(100*thr),
fontsize='x-small', bbox=bbox_txt)
ax[m].text(varexp[thr][0], 25,
'{} components explain\n{:.0f}% of variance'.format(
varexp[thr][0], 100*thr),
rotation=90,
horizontalalignment='center',
fontsize='xx-small',
bbox=bbox_txt)
ax[m].set_yticks([])
ax[m].set_yticklabels([])
for tick in ax[m].xaxis.get_major_ticks():
tick.label.set_fontsize('x-small')
tick.label.set_rotation('vertical')
for side in ['top', 'right', 'left']:
ax[m].spines[side].set_color('none')
ax[m].spines[side].set_visible(False)
if output_file is not None:
figure = plt.gcf()
figure.savefig(output_file, bbox_inches='tight')
plt.close(figure)
figure = None
return output_file
return ax |
def print_profile_info(org_vm, profile_instance):
"""
Print information on a profile defined by profile_instance.
Parameters:
org_vm: The value mapping for CIMRegisterdProfile and
RegisteredOrganization so that the value and not value mapping
is displayed.
profile_instance: instance of a profile to be printed
"""
org = org_vm.tovalues(profile_instance['RegisteredOrganization'])
name = profile_instance['RegisteredName']
vers = profile_instance['RegisteredVersion']
print(" %s %s Profile %s" % (org, name, vers)) | Print information on a profile defined by profile_instance.
Parameters:
org_vm: The value mapping for CIMRegisterdProfile and
RegisteredOrganization so that the value and not value mapping
is displayed.
profile_instance: instance of a profile to be printed | Below is the the instruction that describes the task:
### Input:
Print information on a profile defined by profile_instance.
Parameters:
org_vm: The value mapping for CIMRegisterdProfile and
RegisteredOrganization so that the value and not value mapping
is displayed.
profile_instance: instance of a profile to be printed
### Response:
def print_profile_info(org_vm, profile_instance):
"""
Print information on a profile defined by profile_instance.
Parameters:
org_vm: The value mapping for CIMRegisterdProfile and
RegisteredOrganization so that the value and not value mapping
is displayed.
profile_instance: instance of a profile to be printed
"""
org = org_vm.tovalues(profile_instance['RegisteredOrganization'])
name = profile_instance['RegisteredName']
vers = profile_instance['RegisteredVersion']
print(" %s %s Profile %s" % (org, name, vers)) |
def load_exchange_word_vectors(
filename = "database.db",
maximum_number_of_events = None
):
"""
Load exchange data and return dataset.
"""
log.info("load word vectors of database {filename}".format(
filename = filename
))
# Ensure that the database exists.
if not os.path.isfile(filename):
log.info("database {filename} nonexistent".format(
filename = filename
))
program.terminate()
raise Exception
# Access the database.
database = access_database(filename = filename)
# Access or create the exchanges table.
table_exchanges = database["exchanges"]
# Access exchanges.
table_name = "exchanges"
# Create a datavision dataset.
data = datavision.Dataset()
# progress
progress = shijian.Progress()
progress.engage_quick_calculation_mode()
number_of_entries = len(database[table_name])
index = 0
for index_entry, entry in enumerate(database[table_name].all()):
if maximum_number_of_events is not None and\
index >= int(maximum_number_of_events):
log.info(
"loaded maximum requested number of events " +
"({maximum_number_of_events})\r".format(
maximum_number_of_events = maximum_number_of_events
)
)
break
#unique_identifier = str(entry["id"])
utteranceWordVector = str(entry["utteranceWordVector"])
responseWordVector = str(entry["responseWordVector"])
if utteranceWordVector != "None" and responseWordVector != "None":
index += 1
utteranceWordVector = eval("np." + utteranceWordVector.replace("float32", "np.float32"))
responseWordVector = eval("np." + responseWordVector.replace("float32", "np.float32"))
data.variable(index = index, name = "utteranceWordVector", value = utteranceWordVector)
data.variable(index = index, name = "responseWordVector", value = responseWordVector )
#utteranceWordVector = list(eval("np." + utteranceWordVector.replace("float32", "np.float32")))
#responseWordVector = list(eval("np." + responseWordVector.replace("float32", "np.float32")))
#for index_component, component in enumerate(utteranceWordVector):
# data.variable(index = index, name = "uwv" + str(index_component), value = component)
#for index_component, component in enumerate(responseWordVector):
# data.variable(index = index, name = "rwv" + str(index_component), value = component)
print progress.add_datum(fraction = index_entry / number_of_entries),
return data | Load exchange data and return dataset. | Below is the the instruction that describes the task:
### Input:
Load exchange data and return dataset.
### Response:
def load_exchange_word_vectors(
filename = "database.db",
maximum_number_of_events = None
):
"""
Load exchange data and return dataset.
"""
log.info("load word vectors of database {filename}".format(
filename = filename
))
# Ensure that the database exists.
if not os.path.isfile(filename):
log.info("database {filename} nonexistent".format(
filename = filename
))
program.terminate()
raise Exception
# Access the database.
database = access_database(filename = filename)
# Access or create the exchanges table.
table_exchanges = database["exchanges"]
# Access exchanges.
table_name = "exchanges"
# Create a datavision dataset.
data = datavision.Dataset()
# progress
progress = shijian.Progress()
progress.engage_quick_calculation_mode()
number_of_entries = len(database[table_name])
index = 0
for index_entry, entry in enumerate(database[table_name].all()):
if maximum_number_of_events is not None and\
index >= int(maximum_number_of_events):
log.info(
"loaded maximum requested number of events " +
"({maximum_number_of_events})\r".format(
maximum_number_of_events = maximum_number_of_events
)
)
break
#unique_identifier = str(entry["id"])
utteranceWordVector = str(entry["utteranceWordVector"])
responseWordVector = str(entry["responseWordVector"])
if utteranceWordVector != "None" and responseWordVector != "None":
index += 1
utteranceWordVector = eval("np." + utteranceWordVector.replace("float32", "np.float32"))
responseWordVector = eval("np." + responseWordVector.replace("float32", "np.float32"))
data.variable(index = index, name = "utteranceWordVector", value = utteranceWordVector)
data.variable(index = index, name = "responseWordVector", value = responseWordVector )
#utteranceWordVector = list(eval("np." + utteranceWordVector.replace("float32", "np.float32")))
#responseWordVector = list(eval("np." + responseWordVector.replace("float32", "np.float32")))
#for index_component, component in enumerate(utteranceWordVector):
# data.variable(index = index, name = "uwv" + str(index_component), value = component)
#for index_component, component in enumerate(responseWordVector):
# data.variable(index = index, name = "rwv" + str(index_component), value = component)
print progress.add_datum(fraction = index_entry / number_of_entries),
return data |
def band_path(self, band_id, for_gdal=False, absolute=False):
"""Return paths of given band's jp2 files for all granules."""
band_id = str(band_id).zfill(2)
if not isinstance(band_id, str) or band_id not in BAND_IDS:
raise ValueError("band ID not valid: %s" % band_id)
if self.dataset.is_zip and for_gdal:
zip_prefix = "/vsizip/"
if absolute:
granule_basepath = zip_prefix + os.path.dirname(os.path.join(
self.dataset.path,
self.dataset.product_metadata_path
))
else:
granule_basepath = zip_prefix + os.path.dirname(
self.dataset.product_metadata_path
)
else:
if absolute:
granule_basepath = os.path.dirname(os.path.join(
self.dataset.path,
self.dataset.product_metadata_path
))
else:
granule_basepath = os.path.dirname(
self.dataset.product_metadata_path
)
product_org = self.dataset._product_metadata.iter(
"Product_Organisation").next()
granule_item = [
g
for g in chain(*[gl for gl in product_org.iter("Granule_List")])
if self.granule_identifier == g.attrib["granuleIdentifier"]
]
if len(granule_item) != 1:
raise S2ReaderMetadataError(
"Granule ID cannot be found in product metadata."
)
rel_path = [
f.text for f in granule_item[0].iter() if f.text[-2:] == band_id
]
if len(rel_path) != 1:
# Apparently some SAFE files don't contain all bands. In such a
# case, raise a warning and return None.
warnings.warn(
"%s: image path to band %s could not be extracted" % (
self.dataset.path, band_id
)
)
return
img_path = os.path.join(granule_basepath, rel_path[0]) + ".jp2"
# Above solution still fails on the "safe" test dataset. Therefore,
# the path gets checked if it contains the IMG_DATA folder and if not,
# try to guess the path from the old schema. Not happy with this but
# couldn't find a better way yet.
if "IMG_DATA" in img_path:
return img_path
else:
if self.dataset.is_zip:
zip_prefix = "/vsizip/"
granule_basepath = zip_prefix + os.path.join(
self.dataset.path, self.granule_path)
else:
granule_basepath = self.granule_path
return os.path.join(
os.path.join(granule_basepath, "IMG_DATA"),
"".join([
"_".join((self.granule_identifier).split("_")[:-1]),
"_B",
band_id,
".jp2"
])
) | Return paths of given band's jp2 files for all granules. | Below is the the instruction that describes the task:
### Input:
Return paths of given band's jp2 files for all granules.
### Response:
def band_path(self, band_id, for_gdal=False, absolute=False):
"""Return paths of given band's jp2 files for all granules."""
band_id = str(band_id).zfill(2)
if not isinstance(band_id, str) or band_id not in BAND_IDS:
raise ValueError("band ID not valid: %s" % band_id)
if self.dataset.is_zip and for_gdal:
zip_prefix = "/vsizip/"
if absolute:
granule_basepath = zip_prefix + os.path.dirname(os.path.join(
self.dataset.path,
self.dataset.product_metadata_path
))
else:
granule_basepath = zip_prefix + os.path.dirname(
self.dataset.product_metadata_path
)
else:
if absolute:
granule_basepath = os.path.dirname(os.path.join(
self.dataset.path,
self.dataset.product_metadata_path
))
else:
granule_basepath = os.path.dirname(
self.dataset.product_metadata_path
)
product_org = self.dataset._product_metadata.iter(
"Product_Organisation").next()
granule_item = [
g
for g in chain(*[gl for gl in product_org.iter("Granule_List")])
if self.granule_identifier == g.attrib["granuleIdentifier"]
]
if len(granule_item) != 1:
raise S2ReaderMetadataError(
"Granule ID cannot be found in product metadata."
)
rel_path = [
f.text for f in granule_item[0].iter() if f.text[-2:] == band_id
]
if len(rel_path) != 1:
# Apparently some SAFE files don't contain all bands. In such a
# case, raise a warning and return None.
warnings.warn(
"%s: image path to band %s could not be extracted" % (
self.dataset.path, band_id
)
)
return
img_path = os.path.join(granule_basepath, rel_path[0]) + ".jp2"
# Above solution still fails on the "safe" test dataset. Therefore,
# the path gets checked if it contains the IMG_DATA folder and if not,
# try to guess the path from the old schema. Not happy with this but
# couldn't find a better way yet.
if "IMG_DATA" in img_path:
return img_path
else:
if self.dataset.is_zip:
zip_prefix = "/vsizip/"
granule_basepath = zip_prefix + os.path.join(
self.dataset.path, self.granule_path)
else:
granule_basepath = self.granule_path
return os.path.join(
os.path.join(granule_basepath, "IMG_DATA"),
"".join([
"_".join((self.granule_identifier).split("_")[:-1]),
"_B",
band_id,
".jp2"
])
) |
def _ctab_property_block(stream):
"""Process properties block of ``Ctab``.
:param stream: Queue containing lines of text.
:type stream: :py:class:`collections.deque`
:return: Tuples of data.
:rtype: :class:`~ctfile.tokenizer.CtabPropertiesBlockLine`
"""
line = stream.popleft()
while line != 'M END':
name = line.split()[1]
yield CtabPropertiesBlockLine(name, line)
line = stream.popleft() | Process properties block of ``Ctab``.
:param stream: Queue containing lines of text.
:type stream: :py:class:`collections.deque`
:return: Tuples of data.
:rtype: :class:`~ctfile.tokenizer.CtabPropertiesBlockLine` | Below is the the instruction that describes the task:
### Input:
Process properties block of ``Ctab``.
:param stream: Queue containing lines of text.
:type stream: :py:class:`collections.deque`
:return: Tuples of data.
:rtype: :class:`~ctfile.tokenizer.CtabPropertiesBlockLine`
### Response:
def _ctab_property_block(stream):
"""Process properties block of ``Ctab``.
:param stream: Queue containing lines of text.
:type stream: :py:class:`collections.deque`
:return: Tuples of data.
:rtype: :class:`~ctfile.tokenizer.CtabPropertiesBlockLine`
"""
line = stream.popleft()
while line != 'M END':
name = line.split()[1]
yield CtabPropertiesBlockLine(name, line)
line = stream.popleft() |
def _check_command_result(self):
"""If command result exists run these checks."""
if self.commandResult.startswith('/bin/bash'):
raise UtilError('%s' % self.commandResult.split(' ', 1)[1])
if self.commandResult.startswith('/bin/mv'):
raise UtilError('%s' % self.commandResult.split(' ', 1)[1])
if self.commandResult.startswith('/bin/ls'):
raise UtilError('%s' % self.commandResult.split(' ', 1)[1])
if self.commandResult.startswith('/bin/rm'):
raise UtilError('%s' % self.commandResult.split(' ', 1)[1])
if 'invalid option' in self.commandResult:
raise UtilError('%s' % self.commandResult)
if 'Invalid option' in self.commandResult:
raise UtilError('%s' % self.commandResult)
if 'usage: /usr/bin/get_dossier' in self.commandResult:
raise UtilError('%s' % self.commandResult) | If command result exists run these checks. | Below is the the instruction that describes the task:
### Input:
If command result exists run these checks.
### Response:
def _check_command_result(self):
"""If command result exists run these checks."""
if self.commandResult.startswith('/bin/bash'):
raise UtilError('%s' % self.commandResult.split(' ', 1)[1])
if self.commandResult.startswith('/bin/mv'):
raise UtilError('%s' % self.commandResult.split(' ', 1)[1])
if self.commandResult.startswith('/bin/ls'):
raise UtilError('%s' % self.commandResult.split(' ', 1)[1])
if self.commandResult.startswith('/bin/rm'):
raise UtilError('%s' % self.commandResult.split(' ', 1)[1])
if 'invalid option' in self.commandResult:
raise UtilError('%s' % self.commandResult)
if 'Invalid option' in self.commandResult:
raise UtilError('%s' % self.commandResult)
if 'usage: /usr/bin/get_dossier' in self.commandResult:
raise UtilError('%s' % self.commandResult) |
def validate(self, vat_deets):
"""Validates an existing VAT identification number against VIES."""
request = self._get('validation', vat_deets)
return self.responder(request) | Validates an existing VAT identification number against VIES. | Below is the the instruction that describes the task:
### Input:
Validates an existing VAT identification number against VIES.
### Response:
def validate(self, vat_deets):
"""Validates an existing VAT identification number against VIES."""
request = self._get('validation', vat_deets)
return self.responder(request) |
def _style_text(text):
"""
Apply some HTML highlighting to the contents.
This can't be done in the
"""
# Escape text and apply some formatting.
# To have really good highlighting, pprint would have to be re-implemented.
text = escape(text)
text = text.replace(' <iterator object>', " <small><<var>this object can be used in a 'for' loop</var>></small>")
text = text.replace(' <dynamic item>', ' <small><<var>this object may have extra field names</var>></small>')
text = text.replace(' <dynamic attribute>', ' <small><<var>this object may have extra field names</var>></small>')
text = RE_PROXY.sub('\g<1><small><<var>proxy object</var>></small>', text)
text = RE_FUNCTION.sub('\g<1><small><<var>object method</var>></small>', text)
text = RE_GENERATOR.sub("\g<1><small><<var>generator, use 'for' to traverse it</var>></small>", text)
text = RE_OBJECT_ADDRESS.sub('\g<1><small><<var>\g<2> object</var>></small>', text)
text = RE_MANAGER.sub('\g<1><small><<var>manager, use <kbd>.all</kbd> to traverse it</var>></small>', text)
text = RE_CLASS_REPR.sub('\g<1><small><<var>\g<2> class</var>></small>', text)
# Since Django's WSGIRequest does a pprint like format for it's __repr__, make that styling consistent
text = RE_REQUEST_FIELDNAME.sub('\g<1>:\n <strong style="color: #222;">\g<2></strong>: ', text)
text = RE_REQUEST_CLEANUP1.sub('\g<1>', text)
text = RE_REQUEST_CLEANUP2.sub(')', text)
return mark_safe(text) | Apply some HTML highlighting to the contents.
This can't be done in the | Below is the the instruction that describes the task:
### Input:
Apply some HTML highlighting to the contents.
This can't be done in the
### Response:
def _style_text(text):
"""
Apply some HTML highlighting to the contents.
This can't be done in the
"""
# Escape text and apply some formatting.
# To have really good highlighting, pprint would have to be re-implemented.
text = escape(text)
text = text.replace(' <iterator object>', " <small><<var>this object can be used in a 'for' loop</var>></small>")
text = text.replace(' <dynamic item>', ' <small><<var>this object may have extra field names</var>></small>')
text = text.replace(' <dynamic attribute>', ' <small><<var>this object may have extra field names</var>></small>')
text = RE_PROXY.sub('\g<1><small><<var>proxy object</var>></small>', text)
text = RE_FUNCTION.sub('\g<1><small><<var>object method</var>></small>', text)
text = RE_GENERATOR.sub("\g<1><small><<var>generator, use 'for' to traverse it</var>></small>", text)
text = RE_OBJECT_ADDRESS.sub('\g<1><small><<var>\g<2> object</var>></small>', text)
text = RE_MANAGER.sub('\g<1><small><<var>manager, use <kbd>.all</kbd> to traverse it</var>></small>', text)
text = RE_CLASS_REPR.sub('\g<1><small><<var>\g<2> class</var>></small>', text)
# Since Django's WSGIRequest does a pprint like format for it's __repr__, make that styling consistent
text = RE_REQUEST_FIELDNAME.sub('\g<1>:\n <strong style="color: #222;">\g<2></strong>: ', text)
text = RE_REQUEST_CLEANUP1.sub('\g<1>', text)
text = RE_REQUEST_CLEANUP2.sub(')', text)
return mark_safe(text) |
def nvmlDeviceSetDefaultAutoBoostedClocksEnabled(handle, enabled, flags):
r"""
/**
* Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will
* return to when no compute running processes (e.g. CUDA application which have an active context) are running
*
* For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices.
* Requires root/admin permissions.
*
* Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates
* to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock
* rates are desired.
*
* On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks.
* Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost
* behavior.
*
* @param device The identifier of the target device
* @param enabled What state to try to set default Auto Boosted clocks of the target device to
* @param flags Flags that change the default behavior. Currently Unused.
*
* @return
* - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state.
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
*/
nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled
"""
fn = _nvmlGetFunctionPointer("nvmlDeviceSetDefaultAutoBoostedClocksEnabled")
ret = fn(handle, _nvmlEnableState_t(enabled), c_uint(flags))
_nvmlCheckReturn(ret)
return None | r"""
/**
* Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will
* return to when no compute running processes (e.g. CUDA application which have an active context) are running
*
* For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices.
* Requires root/admin permissions.
*
* Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates
* to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock
* rates are desired.
*
* On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks.
* Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost
* behavior.
*
* @param device The identifier of the target device
* @param enabled What state to try to set default Auto Boosted clocks of the target device to
* @param flags Flags that change the default behavior. Currently Unused.
*
* @return
* - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state.
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
*/
nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled | Below is the the instruction that describes the task:
### Input:
r"""
/**
* Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will
* return to when no compute running processes (e.g. CUDA application which have an active context) are running
*
* For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices.
* Requires root/admin permissions.
*
* Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates
* to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock
* rates are desired.
*
* On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks.
* Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost
* behavior.
*
* @param device The identifier of the target device
* @param enabled What state to try to set default Auto Boosted clocks of the target device to
* @param flags Flags that change the default behavior. Currently Unused.
*
* @return
* - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state.
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
*/
nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled
### Response:
def nvmlDeviceSetDefaultAutoBoostedClocksEnabled(handle, enabled, flags):
r"""
/**
* Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will
* return to when no compute running processes (e.g. CUDA application which have an active context) are running
*
* For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices.
* Requires root/admin permissions.
*
* Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates
* to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock
* rates are desired.
*
* On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks.
* Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost
* behavior.
*
* @param device The identifier of the target device
* @param enabled What state to try to set default Auto Boosted clocks of the target device to
* @param flags Flags that change the default behavior. Currently Unused.
*
* @return
* - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state.
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
*/
nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled
"""
fn = _nvmlGetFunctionPointer("nvmlDeviceSetDefaultAutoBoostedClocksEnabled")
ret = fn(handle, _nvmlEnableState_t(enabled), c_uint(flags))
_nvmlCheckReturn(ret)
return None |
def list_assignments(self, course_id, assignment_ids=None, bucket=None, include=None, needs_grading_count_by_section=None, override_assignment_dates=None, search_term=None):
"""
List assignments.
Returns the list of assignments for the current context.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - include
"""Associations to include with the assignment. The "assignment_visibility" option
requires that the Differentiated Assignments course feature be turned on. If
"observed_users" is passed, submissions for observed users will also be included as an array."""
if include is not None:
self._validate_enum(include, ["submission", "assignment_visibility", "all_dates", "overrides", "observed_users"])
params["include"] = include
# OPTIONAL - search_term
"""The partial title of the assignments to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - override_assignment_dates
"""Apply assignment overrides for each assignment, defaults to true."""
if override_assignment_dates is not None:
params["override_assignment_dates"] = override_assignment_dates
# OPTIONAL - needs_grading_count_by_section
"""Split up "needs_grading_count" by sections into the "needs_grading_count_by_section" key, defaults to false"""
if needs_grading_count_by_section is not None:
params["needs_grading_count_by_section"] = needs_grading_count_by_section
# OPTIONAL - bucket
"""If included, only return certain assignments depending on due date and submission status."""
if bucket is not None:
self._validate_enum(bucket, ["past", "overdue", "undated", "ungraded", "unsubmitted", "upcoming", "future"])
params["bucket"] = bucket
# OPTIONAL - assignment_ids
"""if set, return only assignments specified"""
if assignment_ids is not None:
params["assignment_ids"] = assignment_ids
self.logger.debug("GET /api/v1/courses/{course_id}/assignments with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments".format(**path), data=data, params=params, all_pages=True) | List assignments.
Returns the list of assignments for the current context. | Below is the the instruction that describes the task:
### Input:
List assignments.
Returns the list of assignments for the current context.
### Response:
def list_assignments(self, course_id, assignment_ids=None, bucket=None, include=None, needs_grading_count_by_section=None, override_assignment_dates=None, search_term=None):
"""
List assignments.
Returns the list of assignments for the current context.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - include
"""Associations to include with the assignment. The "assignment_visibility" option
requires that the Differentiated Assignments course feature be turned on. If
"observed_users" is passed, submissions for observed users will also be included as an array."""
if include is not None:
self._validate_enum(include, ["submission", "assignment_visibility", "all_dates", "overrides", "observed_users"])
params["include"] = include
# OPTIONAL - search_term
"""The partial title of the assignments to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - override_assignment_dates
"""Apply assignment overrides for each assignment, defaults to true."""
if override_assignment_dates is not None:
params["override_assignment_dates"] = override_assignment_dates
# OPTIONAL - needs_grading_count_by_section
"""Split up "needs_grading_count" by sections into the "needs_grading_count_by_section" key, defaults to false"""
if needs_grading_count_by_section is not None:
params["needs_grading_count_by_section"] = needs_grading_count_by_section
# OPTIONAL - bucket
"""If included, only return certain assignments depending on due date and submission status."""
if bucket is not None:
self._validate_enum(bucket, ["past", "overdue", "undated", "ungraded", "unsubmitted", "upcoming", "future"])
params["bucket"] = bucket
# OPTIONAL - assignment_ids
"""if set, return only assignments specified"""
if assignment_ids is not None:
params["assignment_ids"] = assignment_ids
self.logger.debug("GET /api/v1/courses/{course_id}/assignments with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/assignments".format(**path), data=data, params=params, all_pages=True) |
def _probvec(r, out):
"""
Fill `out` with randomly sampled probability vectors as rows.
To be complied as a ufunc by guvectorize of Numba. The inputs must
have the same shape except the last axis; the length of the last
axis of `r` must be that of `out` minus 1, i.e., if out.shape[-1] is
k, then r.shape[-1] must be k-1.
Parameters
----------
r : ndarray(float)
Array containing random values in [0, 1).
out : ndarray(float)
Output array.
"""
n = r.shape[0]
r.sort()
out[0] = r[0]
for i in range(1, n):
out[i] = r[i] - r[i-1]
out[n] = 1 - r[n-1] | Fill `out` with randomly sampled probability vectors as rows.
To be complied as a ufunc by guvectorize of Numba. The inputs must
have the same shape except the last axis; the length of the last
axis of `r` must be that of `out` minus 1, i.e., if out.shape[-1] is
k, then r.shape[-1] must be k-1.
Parameters
----------
r : ndarray(float)
Array containing random values in [0, 1).
out : ndarray(float)
Output array. | Below is the the instruction that describes the task:
### Input:
Fill `out` with randomly sampled probability vectors as rows.
To be complied as a ufunc by guvectorize of Numba. The inputs must
have the same shape except the last axis; the length of the last
axis of `r` must be that of `out` minus 1, i.e., if out.shape[-1] is
k, then r.shape[-1] must be k-1.
Parameters
----------
r : ndarray(float)
Array containing random values in [0, 1).
out : ndarray(float)
Output array.
### Response:
def _probvec(r, out):
"""
Fill `out` with randomly sampled probability vectors as rows.
To be complied as a ufunc by guvectorize of Numba. The inputs must
have the same shape except the last axis; the length of the last
axis of `r` must be that of `out` minus 1, i.e., if out.shape[-1] is
k, then r.shape[-1] must be k-1.
Parameters
----------
r : ndarray(float)
Array containing random values in [0, 1).
out : ndarray(float)
Output array.
"""
n = r.shape[0]
r.sort()
out[0] = r[0]
for i in range(1, n):
out[i] = r[i] - r[i-1]
out[n] = 1 - r[n-1] |
def require_dataset(self, name, shape, dtype=None, exact=False, **kwargs):
"""Obtain an array, creating if it doesn't exist. Other `kwargs` are
as per :func:`zarr.hierarchy.Group.create_dataset`.
Parameters
----------
name : string
Array name.
shape : int or tuple of ints
Array shape.
dtype : string or dtype, optional
NumPy dtype.
exact : bool, optional
If True, require `dtype` to match exactly. If false, require
`dtype` can be cast from array dtype.
"""
return self._write_op(self._require_dataset_nosync, name, shape=shape,
dtype=dtype, exact=exact, **kwargs) | Obtain an array, creating if it doesn't exist. Other `kwargs` are
as per :func:`zarr.hierarchy.Group.create_dataset`.
Parameters
----------
name : string
Array name.
shape : int or tuple of ints
Array shape.
dtype : string or dtype, optional
NumPy dtype.
exact : bool, optional
If True, require `dtype` to match exactly. If false, require
`dtype` can be cast from array dtype. | Below is the the instruction that describes the task:
### Input:
Obtain an array, creating if it doesn't exist. Other `kwargs` are
as per :func:`zarr.hierarchy.Group.create_dataset`.
Parameters
----------
name : string
Array name.
shape : int or tuple of ints
Array shape.
dtype : string or dtype, optional
NumPy dtype.
exact : bool, optional
If True, require `dtype` to match exactly. If false, require
`dtype` can be cast from array dtype.
### Response:
def require_dataset(self, name, shape, dtype=None, exact=False, **kwargs):
"""Obtain an array, creating if it doesn't exist. Other `kwargs` are
as per :func:`zarr.hierarchy.Group.create_dataset`.
Parameters
----------
name : string
Array name.
shape : int or tuple of ints
Array shape.
dtype : string or dtype, optional
NumPy dtype.
exact : bool, optional
If True, require `dtype` to match exactly. If false, require
`dtype` can be cast from array dtype.
"""
return self._write_op(self._require_dataset_nosync, name, shape=shape,
dtype=dtype, exact=exact, **kwargs) |
def append(self, event, force = False):
'''
Append an event to queue. The events are classified and appended to sub-queues
:param event: input event
:param force: if True, the event is appended even if the queue is full
:returns: None if appended successfully, or a matcher to match a QueueCanWriteEvent otherwise
'''
if self.tree is None:
if self.parent is None:
raise IndexError('The queue is removed')
else:
return self.parent.parent.append(event, force)
q = self.tree.matchfirst(event)
return q.append(event, force) | Append an event to queue. The events are classified and appended to sub-queues
:param event: input event
:param force: if True, the event is appended even if the queue is full
:returns: None if appended successfully, or a matcher to match a QueueCanWriteEvent otherwise | Below is the the instruction that describes the task:
### Input:
Append an event to queue. The events are classified and appended to sub-queues
:param event: input event
:param force: if True, the event is appended even if the queue is full
:returns: None if appended successfully, or a matcher to match a QueueCanWriteEvent otherwise
### Response:
def append(self, event, force = False):
'''
Append an event to queue. The events are classified and appended to sub-queues
:param event: input event
:param force: if True, the event is appended even if the queue is full
:returns: None if appended successfully, or a matcher to match a QueueCanWriteEvent otherwise
'''
if self.tree is None:
if self.parent is None:
raise IndexError('The queue is removed')
else:
return self.parent.parent.append(event, force)
q = self.tree.matchfirst(event)
return q.append(event, force) |
def get_first_name_last_name(self):
"""
:rtype: str
"""
names = []
if self._get_first_names():
names += self._get_first_names()
if self._get_additional_names():
names += self._get_additional_names()
if self._get_last_names():
names += self._get_last_names()
if names:
return helpers.list_to_string(names, " ")
else:
return self.get_full_name() | :rtype: str | Below is the the instruction that describes the task:
### Input:
:rtype: str
### Response:
def get_first_name_last_name(self):
"""
:rtype: str
"""
names = []
if self._get_first_names():
names += self._get_first_names()
if self._get_additional_names():
names += self._get_additional_names()
if self._get_last_names():
names += self._get_last_names()
if names:
return helpers.list_to_string(names, " ")
else:
return self.get_full_name() |
def softplus(X):
""" Pass X through a soft-plus function, , in a numerically
stable way (using the log-sum-exp trick).
The softplus transformation is:
.. math::
\log(1 + \exp\{X\})
Parameters
----------
X: ndarray
shape (N,) array or shape (N, D) array of data.
Returns
-------
spX: ndarray
array of same shape of X with the result of softmax(X).
"""
if np.isscalar(X):
return logsumexp(np.vstack((np.zeros(1), [X])).T, axis=1)[0]
N = X.shape[0]
if X.ndim == 1:
return logsumexp(np.vstack((np.zeros(N), X)).T, axis=1)
elif X.ndim == 2:
sftX = np.empty(X.shape, dtype=float)
for d in range(X.shape[1]):
sftX[:, d] = logsumexp(np.vstack((np.zeros(N), X[:, d])).T, axis=1)
return sftX
else:
raise ValueError("This only works on up to 2D arrays.") | Pass X through a soft-plus function, , in a numerically
stable way (using the log-sum-exp trick).
The softplus transformation is:
.. math::
\log(1 + \exp\{X\})
Parameters
----------
X: ndarray
shape (N,) array or shape (N, D) array of data.
Returns
-------
spX: ndarray
array of same shape of X with the result of softmax(X). | Below is the the instruction that describes the task:
### Input:
Pass X through a soft-plus function, , in a numerically
stable way (using the log-sum-exp trick).
The softplus transformation is:
.. math::
\log(1 + \exp\{X\})
Parameters
----------
X: ndarray
shape (N,) array or shape (N, D) array of data.
Returns
-------
spX: ndarray
array of same shape of X with the result of softmax(X).
### Response:
def softplus(X):
""" Pass X through a soft-plus function, , in a numerically
stable way (using the log-sum-exp trick).
The softplus transformation is:
.. math::
\log(1 + \exp\{X\})
Parameters
----------
X: ndarray
shape (N,) array or shape (N, D) array of data.
Returns
-------
spX: ndarray
array of same shape of X with the result of softmax(X).
"""
if np.isscalar(X):
return logsumexp(np.vstack((np.zeros(1), [X])).T, axis=1)[0]
N = X.shape[0]
if X.ndim == 1:
return logsumexp(np.vstack((np.zeros(N), X)).T, axis=1)
elif X.ndim == 2:
sftX = np.empty(X.shape, dtype=float)
for d in range(X.shape[1]):
sftX[:, d] = logsumexp(np.vstack((np.zeros(N), X[:, d])).T, axis=1)
return sftX
else:
raise ValueError("This only works on up to 2D arrays.") |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(FileSupplier, self).fix_config(options)
opt = "files"
if opt not in options:
options[opt] = []
if opt not in self.help:
self.help[opt] = "The files to output (list of string)."
return options | Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
### Response:
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
options = super(FileSupplier, self).fix_config(options)
opt = "files"
if opt not in options:
options[opt] = []
if opt not in self.help:
self.help[opt] = "The files to output (list of string)."
return options |
def save(self, commit=True):
""" Saves the instance. """
if not self.instance.pk:
# First, handle topic creation
if 'topic_type' in self.cleaned_data and len(self.cleaned_data['topic_type']):
topic_type = self.cleaned_data['topic_type']
else:
topic_type = Topic.TOPIC_POST
topic = Topic(
forum=self.forum,
subject=self.cleaned_data['subject'], # The topic's name is the post's name
type=topic_type,
status=Topic.TOPIC_UNLOCKED,
approved=self.perm_handler.can_post_without_approval(self.forum, self.user),
)
if not self.user.is_anonymous:
topic.poster = self.user
self.topic = topic
if commit:
topic.save()
else:
if 'topic_type' in self.cleaned_data and len(self.cleaned_data['topic_type']):
if self.instance.topic.type != self.cleaned_data['topic_type']:
self.instance.topic.type = self.cleaned_data['topic_type']
self.instance.topic._simple_save()
return super().save(commit) | Saves the instance. | Below is the the instruction that describes the task:
### Input:
Saves the instance.
### Response:
def save(self, commit=True):
""" Saves the instance. """
if not self.instance.pk:
# First, handle topic creation
if 'topic_type' in self.cleaned_data and len(self.cleaned_data['topic_type']):
topic_type = self.cleaned_data['topic_type']
else:
topic_type = Topic.TOPIC_POST
topic = Topic(
forum=self.forum,
subject=self.cleaned_data['subject'], # The topic's name is the post's name
type=topic_type,
status=Topic.TOPIC_UNLOCKED,
approved=self.perm_handler.can_post_without_approval(self.forum, self.user),
)
if not self.user.is_anonymous:
topic.poster = self.user
self.topic = topic
if commit:
topic.save()
else:
if 'topic_type' in self.cleaned_data and len(self.cleaned_data['topic_type']):
if self.instance.topic.type != self.cleaned_data['topic_type']:
self.instance.topic.type = self.cleaned_data['topic_type']
self.instance.topic._simple_save()
return super().save(commit) |
async def node(self, node, *, dc=None, watch=None, consistency=None):
"""Returns the health info of a node.
Parameters:
node (ObjectID): Node ID
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
CollectionMeta: where value is a list of checks
returns the checks specific of a node.
It returns a body like this::
[
{
"Node": "foobar",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "",
"ServiceName": ""
},
{
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "redis",
"ServiceName": "redis"
}
]
In this case, we can see there is a system level check (that is, a
check with no associated ``ServiceID``) as well as a service check for
Redis. The "serfHealth" check is special in that it is automatically
present on every node. When a node joins the Consul cluster, it is
part of a distributed failure detection provided by Serf. If a node
fails, it is detected and the status is automatically changed to
``critical``.
"""
node_id = extract_attr(node, keys=["Node", "ID"])
params = {"dc": dc}
response = await self._api.get("/v1/health/node", node_id,
params=params,
watch=watch,
consistency=consistency)
return consul(response) | Returns the health info of a node.
Parameters:
node (ObjectID): Node ID
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
CollectionMeta: where value is a list of checks
returns the checks specific of a node.
It returns a body like this::
[
{
"Node": "foobar",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "",
"ServiceName": ""
},
{
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "redis",
"ServiceName": "redis"
}
]
In this case, we can see there is a system level check (that is, a
check with no associated ``ServiceID``) as well as a service check for
Redis. The "serfHealth" check is special in that it is automatically
present on every node. When a node joins the Consul cluster, it is
part of a distributed failure detection provided by Serf. If a node
fails, it is detected and the status is automatically changed to
``critical``. | Below is the the instruction that describes the task:
### Input:
Returns the health info of a node.
Parameters:
node (ObjectID): Node ID
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
CollectionMeta: where value is a list of checks
returns the checks specific of a node.
It returns a body like this::
[
{
"Node": "foobar",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "",
"ServiceName": ""
},
{
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "redis",
"ServiceName": "redis"
}
]
In this case, we can see there is a system level check (that is, a
check with no associated ``ServiceID``) as well as a service check for
Redis. The "serfHealth" check is special in that it is automatically
present on every node. When a node joins the Consul cluster, it is
part of a distributed failure detection provided by Serf. If a node
fails, it is detected and the status is automatically changed to
``critical``.
### Response:
async def node(self, node, *, dc=None, watch=None, consistency=None):
"""Returns the health info of a node.
Parameters:
node (ObjectID): Node ID
dc (str): Specify datacenter that will be used.
Defaults to the agent's local datacenter.
watch (Blocking): Do a blocking query
consistency (Consistency): Force consistency
Returns:
CollectionMeta: where value is a list of checks
returns the checks specific of a node.
It returns a body like this::
[
{
"Node": "foobar",
"CheckID": "serfHealth",
"Name": "Serf Health Status",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "",
"ServiceName": ""
},
{
"Node": "foobar",
"CheckID": "service:redis",
"Name": "Service 'redis' check",
"Status": "passing",
"Notes": "",
"Output": "",
"ServiceID": "redis",
"ServiceName": "redis"
}
]
In this case, we can see there is a system level check (that is, a
check with no associated ``ServiceID``) as well as a service check for
Redis. The "serfHealth" check is special in that it is automatically
present on every node. When a node joins the Consul cluster, it is
part of a distributed failure detection provided by Serf. If a node
fails, it is detected and the status is automatically changed to
``critical``.
"""
node_id = extract_attr(node, keys=["Node", "ID"])
params = {"dc": dc}
response = await self._api.get("/v1/health/node", node_id,
params=params,
watch=watch,
consistency=consistency)
return consul(response) |
def documents(self, full=False):
'''Return list of documents owned by user.
If `full=True`, it'll download all pages returned by the HTTP server'''
url = self.base_url + self.DOCUMENTS_PAGE
class_ = Document
results = self._retrieve_resources(url, class_, full)
return results | Return list of documents owned by user.
If `full=True`, it'll download all pages returned by the HTTP server | Below is the the instruction that describes the task:
### Input:
Return list of documents owned by user.
If `full=True`, it'll download all pages returned by the HTTP server
### Response:
def documents(self, full=False):
'''Return list of documents owned by user.
If `full=True`, it'll download all pages returned by the HTTP server'''
url = self.base_url + self.DOCUMENTS_PAGE
class_ = Document
results = self._retrieve_resources(url, class_, full)
return results |
def _get_string_and_set_width(self, combination, mode):
"""
Construct the string to be displayed and record the max width.
"""
show = "{}".format(self._separator(mode)).join(combination)
show = show.rstrip("{}".format(self._separator(mode)))
self.max_width = max([self.max_width, len(show)])
return show | Construct the string to be displayed and record the max width. | Below is the the instruction that describes the task:
### Input:
Construct the string to be displayed and record the max width.
### Response:
def _get_string_and_set_width(self, combination, mode):
"""
Construct the string to be displayed and record the max width.
"""
show = "{}".format(self._separator(mode)).join(combination)
show = show.rstrip("{}".format(self._separator(mode)))
self.max_width = max([self.max_width, len(show)])
return show |
def get(self, **params):
"""Performs get request to the biomart service.
Args:
**params (dict of str: any): Arbitrary keyword arguments, which
are added as parameters to the get request to biomart.
Returns:
requests.models.Response: Response from biomart for the request.
"""
if self._use_cache:
r = requests.get(self.url, params=params)
else:
with requests_cache.disabled():
r = requests.get(self.url, params=params)
r.raise_for_status()
return r | Performs get request to the biomart service.
Args:
**params (dict of str: any): Arbitrary keyword arguments, which
are added as parameters to the get request to biomart.
Returns:
requests.models.Response: Response from biomart for the request. | Below is the the instruction that describes the task:
### Input:
Performs get request to the biomart service.
Args:
**params (dict of str: any): Arbitrary keyword arguments, which
are added as parameters to the get request to biomart.
Returns:
requests.models.Response: Response from biomart for the request.
### Response:
def get(self, **params):
"""Performs get request to the biomart service.
Args:
**params (dict of str: any): Arbitrary keyword arguments, which
are added as parameters to the get request to biomart.
Returns:
requests.models.Response: Response from biomart for the request.
"""
if self._use_cache:
r = requests.get(self.url, params=params)
else:
with requests_cache.disabled():
r = requests.get(self.url, params=params)
r.raise_for_status()
return r |
def logp_gradient_of_set(variable_set, calculation_set=None):
"""
Calculates the gradient of the joint log posterior with respect to all the variables in variable_set.
Calculation of the log posterior is restricted to the variables in calculation_set.
Returns a dictionary of the gradients.
"""
logp_gradients = {}
for variable in variable_set:
logp_gradients[variable] = logp_gradient(variable, calculation_set)
return logp_gradients | Calculates the gradient of the joint log posterior with respect to all the variables in variable_set.
Calculation of the log posterior is restricted to the variables in calculation_set.
Returns a dictionary of the gradients. | Below is the the instruction that describes the task:
### Input:
Calculates the gradient of the joint log posterior with respect to all the variables in variable_set.
Calculation of the log posterior is restricted to the variables in calculation_set.
Returns a dictionary of the gradients.
### Response:
def logp_gradient_of_set(variable_set, calculation_set=None):
"""
Calculates the gradient of the joint log posterior with respect to all the variables in variable_set.
Calculation of the log posterior is restricted to the variables in calculation_set.
Returns a dictionary of the gradients.
"""
logp_gradients = {}
for variable in variable_set:
logp_gradients[variable] = logp_gradient(variable, calculation_set)
return logp_gradients |
def delete_orderrun(backend, orderrun_id):
"""
Delete the orderrun specified by the argument.
"""
click.secho('%s - Deleting orderrun %s' % (get_datetime(), orderrun_id), fg='green')
check_and_print(DKCloudCommandRunner.delete_orderrun(backend.dki, orderrun_id.strip())) | Delete the orderrun specified by the argument. | Below is the the instruction that describes the task:
### Input:
Delete the orderrun specified by the argument.
### Response:
def delete_orderrun(backend, orderrun_id):
"""
Delete the orderrun specified by the argument.
"""
click.secho('%s - Deleting orderrun %s' % (get_datetime(), orderrun_id), fg='green')
check_and_print(DKCloudCommandRunner.delete_orderrun(backend.dki, orderrun_id.strip())) |
def _init_using_k_equivalence(self, given_graph, sfa=False):
"""
Args:
given_graph (DFA): The DFA states
sfa (boolean): A boolean for chosing SFA
Return:
list, list, list: sm_vector, smi_vector, em_vector initialization vectors
"""
graph = DFA(self.alphabet)
graph.init_from_acceptor(given_graph)
graph.fixminimized(self.alphabet)
# Access Strings
self.access_strings_map = self._bfs_path_states(graph, sorted(
graph.states, key=attrgetter('initial'), reverse=True)[0])
# Find Q
set_q = set(self._object_set_to_state_list(graph.states))
# We will work with states addresses here instead of states stateid for
# more convenience
set_f = set(self._object_set_to_state_list(self._get_accepted(graph)))
# Perform P := {F, Q-F}
set_nf = set_q.copy() - set_f.copy()
self.groups = [set_f.copy(), set_nf.copy()]
self.bookeeping = [(set_f, set_nf, '')]
done = False
while not done:
done = True
new_groups = []
for selectgroup in self.groups:
# _check for each letter if it splits the current group
for character in self.alphabet:
# print 'Testing symbol: ', c
target = defaultdict(list)
target_states = defaultdict(int)
new_g = [set(selectgroup)]
for sid in selectgroup:
# _check if all transitions using c are going in a state
# in the same group. If they are going on a different
# group then split
deststate = self._delta(graph, graph[sid], character)
destgroup = self._get_group_from_state(
deststate.stateid)
target[destgroup].append(sid)
target_states[destgroup] = deststate.stateid
if len(target) > 1:
inv_target_states = {
v: k for k, v in target_states.iteritems()}
new_g = [set(selectedstate) for selectedstate in target.values()]
done = False
# Get all the partitions of destgroups
queue = [set([x for x in target_states.values()])]
while queue:
top = queue.pop(0)
(group1, group2, distinguish_string) = self._partition_group(top)
ng1 = self._reverse_to_source(
target, [inv_target_states[x] for x in group1])
ng2 = self._reverse_to_source(
target, [inv_target_states[x] for x in group2])
dist_string = character + distinguish_string
self.bookeeping.append((ng1, ng2, dist_string))
if len(group1) > 1:
queue.append(group1)
if len(group2) > 1:
queue.append(group2)
break
new_groups += new_g
# End of iteration for the k-equivalence
# Assign new groups and check if any change occured
self.groups = new_groups
sm_vector = [
i for (a, i) in sorted(
self.access_strings_map.items(),
key=lambda x: len(x[1]))]
if not sfa:
smi_vector = ['{}{}'.format(a, b)
for b in self.alphabet for a in sm_vector]
else:
smi_vector = self._init_smi(graph, self.access_strings_map)
em_vector = [distinguish_string for (_, _, distinguish_string) in self.bookeeping]
return sm_vector, smi_vector, em_vector | Args:
given_graph (DFA): The DFA states
sfa (boolean): A boolean for chosing SFA
Return:
list, list, list: sm_vector, smi_vector, em_vector initialization vectors | Below is the the instruction that describes the task:
### Input:
Args:
given_graph (DFA): The DFA states
sfa (boolean): A boolean for chosing SFA
Return:
list, list, list: sm_vector, smi_vector, em_vector initialization vectors
### Response:
def _init_using_k_equivalence(self, given_graph, sfa=False):
"""
Args:
given_graph (DFA): The DFA states
sfa (boolean): A boolean for chosing SFA
Return:
list, list, list: sm_vector, smi_vector, em_vector initialization vectors
"""
graph = DFA(self.alphabet)
graph.init_from_acceptor(given_graph)
graph.fixminimized(self.alphabet)
# Access Strings
self.access_strings_map = self._bfs_path_states(graph, sorted(
graph.states, key=attrgetter('initial'), reverse=True)[0])
# Find Q
set_q = set(self._object_set_to_state_list(graph.states))
# We will work with states addresses here instead of states stateid for
# more convenience
set_f = set(self._object_set_to_state_list(self._get_accepted(graph)))
# Perform P := {F, Q-F}
set_nf = set_q.copy() - set_f.copy()
self.groups = [set_f.copy(), set_nf.copy()]
self.bookeeping = [(set_f, set_nf, '')]
done = False
while not done:
done = True
new_groups = []
for selectgroup in self.groups:
# _check for each letter if it splits the current group
for character in self.alphabet:
# print 'Testing symbol: ', c
target = defaultdict(list)
target_states = defaultdict(int)
new_g = [set(selectgroup)]
for sid in selectgroup:
# _check if all transitions using c are going in a state
# in the same group. If they are going on a different
# group then split
deststate = self._delta(graph, graph[sid], character)
destgroup = self._get_group_from_state(
deststate.stateid)
target[destgroup].append(sid)
target_states[destgroup] = deststate.stateid
if len(target) > 1:
inv_target_states = {
v: k for k, v in target_states.iteritems()}
new_g = [set(selectedstate) for selectedstate in target.values()]
done = False
# Get all the partitions of destgroups
queue = [set([x for x in target_states.values()])]
while queue:
top = queue.pop(0)
(group1, group2, distinguish_string) = self._partition_group(top)
ng1 = self._reverse_to_source(
target, [inv_target_states[x] for x in group1])
ng2 = self._reverse_to_source(
target, [inv_target_states[x] for x in group2])
dist_string = character + distinguish_string
self.bookeeping.append((ng1, ng2, dist_string))
if len(group1) > 1:
queue.append(group1)
if len(group2) > 1:
queue.append(group2)
break
new_groups += new_g
# End of iteration for the k-equivalence
# Assign new groups and check if any change occured
self.groups = new_groups
sm_vector = [
i for (a, i) in sorted(
self.access_strings_map.items(),
key=lambda x: len(x[1]))]
if not sfa:
smi_vector = ['{}{}'.format(a, b)
for b in self.alphabet for a in sm_vector]
else:
smi_vector = self._init_smi(graph, self.access_strings_map)
em_vector = [distinguish_string for (_, _, distinguish_string) in self.bookeeping]
return sm_vector, smi_vector, em_vector |
def get_thumbnails(self, size=None):
""" Returns this Item Thumbnails. Thumbnails are not supported on
SharePoint Server 2016.
:param size: request only the specified size: ej: "small",
Custom 300x400 px: "c300x400", Crop: "c300x400_Crop"
:return: Thumbnail Data
:rtype: dict
"""
if not self.object_id:
return []
url = self.build_url(
self._endpoints.get('thumbnails').format(id=self.object_id))
params = {}
if size is not None:
params['select'] = size
response = self.con.get(url, params=params)
if not response:
return []
data = response.json()
if not self.thumbnails or size is None:
self.thumbnails = data
return data | Returns this Item Thumbnails. Thumbnails are not supported on
SharePoint Server 2016.
:param size: request only the specified size: ej: "small",
Custom 300x400 px: "c300x400", Crop: "c300x400_Crop"
:return: Thumbnail Data
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Returns this Item Thumbnails. Thumbnails are not supported on
SharePoint Server 2016.
:param size: request only the specified size: ej: "small",
Custom 300x400 px: "c300x400", Crop: "c300x400_Crop"
:return: Thumbnail Data
:rtype: dict
### Response:
def get_thumbnails(self, size=None):
""" Returns this Item Thumbnails. Thumbnails are not supported on
SharePoint Server 2016.
:param size: request only the specified size: ej: "small",
Custom 300x400 px: "c300x400", Crop: "c300x400_Crop"
:return: Thumbnail Data
:rtype: dict
"""
if not self.object_id:
return []
url = self.build_url(
self._endpoints.get('thumbnails').format(id=self.object_id))
params = {}
if size is not None:
params['select'] = size
response = self.con.get(url, params=params)
if not response:
return []
data = response.json()
if not self.thumbnails or size is None:
self.thumbnails = data
return data |
def _check_configuration(self, *attrs):
"""Check that each named attr has been configured
"""
for attr in attrs:
if getattr(self, attr, None) is None:
raise ConfigurationError("{} not configured".format(attr)) | Check that each named attr has been configured | Below is the the instruction that describes the task:
### Input:
Check that each named attr has been configured
### Response:
def _check_configuration(self, *attrs):
"""Check that each named attr has been configured
"""
for attr in attrs:
if getattr(self, attr, None) is None:
raise ConfigurationError("{} not configured".format(attr)) |
Subsets and Splits