code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def create(att_name: str, parsed_att: S, attribute_type: Type[T], caught_exec: Dict[Converter[S, T], Exception]):
"""
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param att_name:
:param parsed_att:
:param attribute_type:
:param caught_exec:
:return:
"""
base_msg = "Error while trying to convert value for attribute '{a}' to type <{t}>:\n" \
" - parsed value is : '{v}' of type <{tv}>\n" \
"".format(a=str(att_name), t=get_pretty_type_str(attribute_type), v=parsed_att,
tv=get_pretty_type_str(type(parsed_att)))
msg = StringIO()
if len(list(caught_exec.keys())) > 0:
msg.writelines(' - converters tried are : \n * ')
msg.writelines('\n * '.join([str(converter) for converter in caught_exec.keys()]))
msg.writelines(' \n Caught the following exceptions: \n')
for converter, err in caught_exec.items():
msg.writelines('--------------- From ' + str(converter) + ' caught: \n')
print_error_to_io_stream(err, msg)
msg.write('\n')
return AttrConversionException(base_msg + msg.getvalue()) | Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param att_name:
:param parsed_att:
:param attribute_type:
:param caught_exec:
:return: | Below is the the instruction that describes the task:
### Input:
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param att_name:
:param parsed_att:
:param attribute_type:
:param caught_exec:
:return:
### Response:
def create(att_name: str, parsed_att: S, attribute_type: Type[T], caught_exec: Dict[Converter[S, T], Exception]):
"""
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param att_name:
:param parsed_att:
:param attribute_type:
:param caught_exec:
:return:
"""
base_msg = "Error while trying to convert value for attribute '{a}' to type <{t}>:\n" \
" - parsed value is : '{v}' of type <{tv}>\n" \
"".format(a=str(att_name), t=get_pretty_type_str(attribute_type), v=parsed_att,
tv=get_pretty_type_str(type(parsed_att)))
msg = StringIO()
if len(list(caught_exec.keys())) > 0:
msg.writelines(' - converters tried are : \n * ')
msg.writelines('\n * '.join([str(converter) for converter in caught_exec.keys()]))
msg.writelines(' \n Caught the following exceptions: \n')
for converter, err in caught_exec.items():
msg.writelines('--------------- From ' + str(converter) + ' caught: \n')
print_error_to_io_stream(err, msg)
msg.write('\n')
return AttrConversionException(base_msg + msg.getvalue()) |
def get_my_hostname(self, split_hostname_on_first_period=False):
"""
Returns a best guess for the hostname registered with OpenStack for this host
"""
hostname = self.init_config.get("os_host") or self.hostname
if split_hostname_on_first_period:
hostname = hostname.split('.')[0]
return hostname | Returns a best guess for the hostname registered with OpenStack for this host | Below is the the instruction that describes the task:
### Input:
Returns a best guess for the hostname registered with OpenStack for this host
### Response:
def get_my_hostname(self, split_hostname_on_first_period=False):
"""
Returns a best guess for the hostname registered with OpenStack for this host
"""
hostname = self.init_config.get("os_host") or self.hostname
if split_hostname_on_first_period:
hostname = hostname.split('.')[0]
return hostname |
def get(self, event):
"""Get a stored configuration"""
try:
comp = event.data['uuid']
except KeyError:
comp = None
if not comp:
self.log('Invalid get request without schema or component',
lvl=error)
return
self.log("Config data get request for ", event.data, "from",
event.user)
component = model_factory(Schema).find_one({
'uuid': comp
})
response = {
'component': 'hfos.ui.configurator',
'action': 'get',
'data': component.serializablefields()
}
self.fireEvent(send(event.client.uuid, response)) | Get a stored configuration | Below is the the instruction that describes the task:
### Input:
Get a stored configuration
### Response:
def get(self, event):
"""Get a stored configuration"""
try:
comp = event.data['uuid']
except KeyError:
comp = None
if not comp:
self.log('Invalid get request without schema or component',
lvl=error)
return
self.log("Config data get request for ", event.data, "from",
event.user)
component = model_factory(Schema).find_one({
'uuid': comp
})
response = {
'component': 'hfos.ui.configurator',
'action': 'get',
'data': component.serializablefields()
}
self.fireEvent(send(event.client.uuid, response)) |
def plotFCM(data, channel_names, kind='histogram', ax=None,
autolabel=True, xlabel_kwargs={}, ylabel_kwargs={},
colorbar=False, grid=False,
**kwargs):
"""
Plots the sample on the current axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
data : DataFrame
{graph_plotFCM_pars}
{common_plot_ax}
Returns
-------
The output of the plot command used
"""
if ax == None: ax = pl.gca()
xlabel_kwargs.setdefault('size', 16)
ylabel_kwargs.setdefault('size', 16)
channel_names = to_list(channel_names)
if len(channel_names) == 1:
# 1D so histogram plot
kwargs.setdefault('color', 'gray')
kwargs.setdefault('histtype', 'stepfilled')
kwargs.setdefault('bins', 200) # Do not move above
x = data[channel_names[0]].values
if len(x) >= 1:
if (len(x) == 1) and isinstance(kwargs['bins'], int):
# Only needed for hist (not hist2d) due to hist function doing
# excessive input checking
warnings.warn("One of the data sets only has a single event. "
"This event won't be plotted unless the bin locations"
" are explicitly provided to the plotting function. ")
return None
plot_output = ax.hist(x, **kwargs)
else:
return None
elif len(channel_names) == 2:
x = data[channel_names[0]].values # value of first channel
y = data[channel_names[1]].values # value of second channel
if len(x) == 0:
# Don't draw a plot if there's no data
return None
if kind == 'scatter':
kwargs.setdefault('edgecolor', 'none')
plot_output = ax.scatter(x, y, **kwargs)
elif kind == 'histogram':
kwargs.setdefault('bins', 200) # Do not move above
kwargs.setdefault('cmin', 1)
kwargs.setdefault('cmap', pl.cm.copper)
kwargs.setdefault('norm', matplotlib.colors.LogNorm())
plot_output = ax.hist2d(x, y, **kwargs)
mappable = plot_output[-1]
if colorbar:
pl.colorbar(mappable, ax=ax)
else:
raise ValueError("Not a valid plot type. Must be 'scatter', 'histogram'")
else:
raise ValueError('Received an unexpected number of channels: "{}"'.format(channel_names))
pl.grid(grid)
if autolabel:
y_label_text = 'Counts' if len(channel_names) == 1 else channel_names[1]
ax.set_xlabel(channel_names[0], **xlabel_kwargs)
ax.set_ylabel(y_label_text, **ylabel_kwargs)
return plot_output | Plots the sample on the current axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
data : DataFrame
{graph_plotFCM_pars}
{common_plot_ax}
Returns
-------
The output of the plot command used | Below is the the instruction that describes the task:
### Input:
Plots the sample on the current axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
data : DataFrame
{graph_plotFCM_pars}
{common_plot_ax}
Returns
-------
The output of the plot command used
### Response:
def plotFCM(data, channel_names, kind='histogram', ax=None,
autolabel=True, xlabel_kwargs={}, ylabel_kwargs={},
colorbar=False, grid=False,
**kwargs):
"""
Plots the sample on the current axis.
Follow with a call to matplotlibs show() in order to see the plot.
Parameters
----------
data : DataFrame
{graph_plotFCM_pars}
{common_plot_ax}
Returns
-------
The output of the plot command used
"""
if ax == None: ax = pl.gca()
xlabel_kwargs.setdefault('size', 16)
ylabel_kwargs.setdefault('size', 16)
channel_names = to_list(channel_names)
if len(channel_names) == 1:
# 1D so histogram plot
kwargs.setdefault('color', 'gray')
kwargs.setdefault('histtype', 'stepfilled')
kwargs.setdefault('bins', 200) # Do not move above
x = data[channel_names[0]].values
if len(x) >= 1:
if (len(x) == 1) and isinstance(kwargs['bins'], int):
# Only needed for hist (not hist2d) due to hist function doing
# excessive input checking
warnings.warn("One of the data sets only has a single event. "
"This event won't be plotted unless the bin locations"
" are explicitly provided to the plotting function. ")
return None
plot_output = ax.hist(x, **kwargs)
else:
return None
elif len(channel_names) == 2:
x = data[channel_names[0]].values # value of first channel
y = data[channel_names[1]].values # value of second channel
if len(x) == 0:
# Don't draw a plot if there's no data
return None
if kind == 'scatter':
kwargs.setdefault('edgecolor', 'none')
plot_output = ax.scatter(x, y, **kwargs)
elif kind == 'histogram':
kwargs.setdefault('bins', 200) # Do not move above
kwargs.setdefault('cmin', 1)
kwargs.setdefault('cmap', pl.cm.copper)
kwargs.setdefault('norm', matplotlib.colors.LogNorm())
plot_output = ax.hist2d(x, y, **kwargs)
mappable = plot_output[-1]
if colorbar:
pl.colorbar(mappable, ax=ax)
else:
raise ValueError("Not a valid plot type. Must be 'scatter', 'histogram'")
else:
raise ValueError('Received an unexpected number of channels: "{}"'.format(channel_names))
pl.grid(grid)
if autolabel:
y_label_text = 'Counts' if len(channel_names) == 1 else channel_names[1]
ax.set_xlabel(channel_names[0], **xlabel_kwargs)
ax.set_ylabel(y_label_text, **ylabel_kwargs)
return plot_output |
def print_png(o):
"""
A function to display sympy expression using inline style LaTeX in PNG.
"""
s = latex(o, mode='inline')
# mathtext does not understand certain latex flags, so we try to replace
# them with suitable subs.
s = s.replace('\\operatorname','')
s = s.replace('\\overline', '\\bar')
png = latex_to_png(s)
return png | A function to display sympy expression using inline style LaTeX in PNG. | Below is the the instruction that describes the task:
### Input:
A function to display sympy expression using inline style LaTeX in PNG.
### Response:
def print_png(o):
"""
A function to display sympy expression using inline style LaTeX in PNG.
"""
s = latex(o, mode='inline')
# mathtext does not understand certain latex flags, so we try to replace
# them with suitable subs.
s = s.replace('\\operatorname','')
s = s.replace('\\overline', '\\bar')
png = latex_to_png(s)
return png |
def from_sky(cls, distancelimit=15, magnitudelimit=18):
'''
Create a Constellation from a criteria search of the whole sky.
Parameters
----------
distancelimit : float
Maximum distance (parsecs).
magnitudelimit : float
Maximum magnitude (for Gaia G).
'''
# define a query for cone search surrounding this center
criteria = []
if distancelimit is not None:
criteria.append('parallax >= {}'.format(1000.0/distancelimit))
if magnitudelimit is not None:
criteria.append('phot_g_mean_mag <= {}'.format(magnitudelimit))
allskyquery = """{} WHERE {}""".format(cls.basequery, ' and '.join(criteria))
print(allskyquery)
# run the query
print('querying Gaia DR2, for distance<{} and G<{}'.format(distancelimit, magnitudelimit))
table = query(allskyquery)
# store the search parameters in this object
c = cls(cls.standardize_table(table))
c.standardized.meta['query'] = allskyquery
c.standardized.meta['magnitudelimit'] = magnitudelimit
c.standardized.meta['distancelimit'] = distancelimit
#c.distancelimit = distancelimit
#c.magnitudelimit = magnitudelimit or c.magnitudelimit
return c | Create a Constellation from a criteria search of the whole sky.
Parameters
----------
distancelimit : float
Maximum distance (parsecs).
magnitudelimit : float
Maximum magnitude (for Gaia G). | Below is the the instruction that describes the task:
### Input:
Create a Constellation from a criteria search of the whole sky.
Parameters
----------
distancelimit : float
Maximum distance (parsecs).
magnitudelimit : float
Maximum magnitude (for Gaia G).
### Response:
def from_sky(cls, distancelimit=15, magnitudelimit=18):
'''
Create a Constellation from a criteria search of the whole sky.
Parameters
----------
distancelimit : float
Maximum distance (parsecs).
magnitudelimit : float
Maximum magnitude (for Gaia G).
'''
# define a query for cone search surrounding this center
criteria = []
if distancelimit is not None:
criteria.append('parallax >= {}'.format(1000.0/distancelimit))
if magnitudelimit is not None:
criteria.append('phot_g_mean_mag <= {}'.format(magnitudelimit))
allskyquery = """{} WHERE {}""".format(cls.basequery, ' and '.join(criteria))
print(allskyquery)
# run the query
print('querying Gaia DR2, for distance<{} and G<{}'.format(distancelimit, magnitudelimit))
table = query(allskyquery)
# store the search parameters in this object
c = cls(cls.standardize_table(table))
c.standardized.meta['query'] = allskyquery
c.standardized.meta['magnitudelimit'] = magnitudelimit
c.standardized.meta['distancelimit'] = distancelimit
#c.distancelimit = distancelimit
#c.magnitudelimit = magnitudelimit or c.magnitudelimit
return c |
def analyzeParameters(expName, suite):
"""
Analyze the impact of each list parameter in this experiment
"""
print("\n================",expName,"=====================")
try:
expParams = suite.get_params(expName)
pprint.pprint(expParams)
for p in ["boost_strength", "k", "learning_rate", "weight_sparsity",
"k_inference_factor", "boost_strength_factor",
"c1_out_channels", "c1_k", "learning_rate_factor",
"batches_in_epoch",
]:
if p in expParams and type(expParams[p]) == list:
print("\n",p)
for v1 in expParams[p]:
# Retrieve the last totalCorrect from each experiment
# Print them sorted from best to worst
values, params = suite.get_values_fix_params(
expName, 0, "testerror", "last", **{p:v1})
v = np.array(values)
try:
print("Average/min/max for", p, v1, "=", v.mean(), v.min(), v.max())
# sortedIndices = v.argsort()
# for i in sortedIndices[::-1]:
# print(v[i],params[i]["name"])
except:
print("Can't compute stats for",p)
except:
print("Couldn't load experiment",expName) | Analyze the impact of each list parameter in this experiment | Below is the the instruction that describes the task:
### Input:
Analyze the impact of each list parameter in this experiment
### Response:
def analyzeParameters(expName, suite):
"""
Analyze the impact of each list parameter in this experiment
"""
print("\n================",expName,"=====================")
try:
expParams = suite.get_params(expName)
pprint.pprint(expParams)
for p in ["boost_strength", "k", "learning_rate", "weight_sparsity",
"k_inference_factor", "boost_strength_factor",
"c1_out_channels", "c1_k", "learning_rate_factor",
"batches_in_epoch",
]:
if p in expParams and type(expParams[p]) == list:
print("\n",p)
for v1 in expParams[p]:
# Retrieve the last totalCorrect from each experiment
# Print them sorted from best to worst
values, params = suite.get_values_fix_params(
expName, 0, "testerror", "last", **{p:v1})
v = np.array(values)
try:
print("Average/min/max for", p, v1, "=", v.mean(), v.min(), v.max())
# sortedIndices = v.argsort()
# for i in sortedIndices[::-1]:
# print(v[i],params[i]["name"])
except:
print("Can't compute stats for",p)
except:
print("Couldn't load experiment",expName) |
def uuid_from_kronos_time(time, _type=UUIDType.RANDOM):
"""
Generate a UUID with the specified time.
If `lowest` is true, return the lexicographically first UUID for the specified
time.
"""
return timeuuid_from_time(int(time) + UUID_TIME_OFFSET, type=_type) | Generate a UUID with the specified time.
If `lowest` is true, return the lexicographically first UUID for the specified
time. | Below is the the instruction that describes the task:
### Input:
Generate a UUID with the specified time.
If `lowest` is true, return the lexicographically first UUID for the specified
time.
### Response:
def uuid_from_kronos_time(time, _type=UUIDType.RANDOM):
"""
Generate a UUID with the specified time.
If `lowest` is true, return the lexicographically first UUID for the specified
time.
"""
return timeuuid_from_time(int(time) + UUID_TIME_OFFSET, type=_type) |
def show_firmware_version_output_show_firmware_version_node_info_firmware_version_info_application_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_firmware_version = ET.Element("show_firmware_version")
config = show_firmware_version
output = ET.SubElement(show_firmware_version, "output")
show_firmware_version = ET.SubElement(output, "show-firmware-version")
node_info = ET.SubElement(show_firmware_version, "node-info")
firmware_version_info = ET.SubElement(node_info, "firmware-version-info")
application_name = ET.SubElement(firmware_version_info, "application-name")
application_name.text = kwargs.pop('application_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_firmware_version_output_show_firmware_version_node_info_firmware_version_info_application_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_firmware_version = ET.Element("show_firmware_version")
config = show_firmware_version
output = ET.SubElement(show_firmware_version, "output")
show_firmware_version = ET.SubElement(output, "show-firmware-version")
node_info = ET.SubElement(show_firmware_version, "node-info")
firmware_version_info = ET.SubElement(node_info, "firmware-version-info")
application_name = ET.SubElement(firmware_version_info, "application-name")
application_name.text = kwargs.pop('application_name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def send(self, test=None):
'''
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
'''
self._check_values()
# Set up message dictionary
json_message = self.to_json_message()
# if (self.__html_body and not self.__text_body) and self.__multipart:
# # TODO: Set up regex to strip html
# pass
# If test is not specified, attempt to read the Django setting
if test is None:
try:
from django.conf import settings as django_settings
test = getattr(django_settings, "POSTMARK_TEST_MODE", None)
except ImportError:
pass
# If this is a test, just print the message
if test:
print('JSON message is:\n%s' % json.dumps(json_message, cls=PMJSONEncoder))
return
if self.__template_id:
endpoint_url = __POSTMARK_URL__ + 'email/withTemplate/'
else:
endpoint_url = __POSTMARK_URL__ + 'email'
# Set up the url Request
req = Request(
endpoint_url,
json.dumps(json_message, cls=PMJSONEncoder).encode('utf8'),
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark: %s' % json_message
result = urlopen(req)
jsontxt = result.read().decode('utf8')
result.close()
if result.code == 200:
self.message_id = json.loads(jsontxt).get('MessageID', None)
return True
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
if err.code == 401:
raise PMMailUnauthorizedException('Sending Unauthorized - incorrect API key.', err)
elif err.code == 422:
try:
jsontxt = err.read().decode('utf8')
jsonobj = json.loads(jsontxt)
desc = jsonobj['Message']
error_code = jsonobj['ErrorCode']
except KeyError:
raise PMMailUnprocessableEntityException('Unprocessable Entity: Description not given')
if error_code == 406:
raise PMMailInactiveRecipientException('You tried to send email to a recipient that has been marked as inactive.')
raise PMMailUnprocessableEntityException('Unprocessable Entity: %s' % desc)
elif err.code == 500:
raise PMMailServerErrorException('Internal server error at Postmark. Admins have been alerted.', err)
except URLError as err:
if hasattr(err, 'reason'):
raise PMMailURLException('URLError: Failed to reach the server: %s (See "inner_exception" for details)' % err.reason, err)
elif hasattr(err, 'code'):
raise PMMailURLException('URLError: %d: The server couldn\'t fufill the request. (See "inner_exception" for details)' % err.code, err)
else:
raise PMMailURLException('URLError: The server couldn\'t fufill the request. (See "inner_exception" for details)', err) | Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark | Below is the the instruction that describes the task:
### Input:
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
### Response:
def send(self, test=None):
'''
Send the email through the Postmark system.
Pass test=True to just print out the resulting
JSON message being sent to Postmark
'''
self._check_values()
# Set up message dictionary
json_message = self.to_json_message()
# if (self.__html_body and not self.__text_body) and self.__multipart:
# # TODO: Set up regex to strip html
# pass
# If test is not specified, attempt to read the Django setting
if test is None:
try:
from django.conf import settings as django_settings
test = getattr(django_settings, "POSTMARK_TEST_MODE", None)
except ImportError:
pass
# If this is a test, just print the message
if test:
print('JSON message is:\n%s' % json.dumps(json_message, cls=PMJSONEncoder))
return
if self.__template_id:
endpoint_url = __POSTMARK_URL__ + 'email/withTemplate/'
else:
endpoint_url = __POSTMARK_URL__ + 'email'
# Set up the url Request
req = Request(
endpoint_url,
json.dumps(json_message, cls=PMJSONEncoder).encode('utf8'),
{
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Postmark-Server-Token': self.__api_key,
'User-agent': self.__user_agent
}
)
# Attempt send
try:
# print 'sending request to postmark: %s' % json_message
result = urlopen(req)
jsontxt = result.read().decode('utf8')
result.close()
if result.code == 200:
self.message_id = json.loads(jsontxt).get('MessageID', None)
return True
else:
raise PMMailSendException('Return code %d: %s' % (result.code, result.msg))
except HTTPError as err:
if err.code == 401:
raise PMMailUnauthorizedException('Sending Unauthorized - incorrect API key.', err)
elif err.code == 422:
try:
jsontxt = err.read().decode('utf8')
jsonobj = json.loads(jsontxt)
desc = jsonobj['Message']
error_code = jsonobj['ErrorCode']
except KeyError:
raise PMMailUnprocessableEntityException('Unprocessable Entity: Description not given')
if error_code == 406:
raise PMMailInactiveRecipientException('You tried to send email to a recipient that has been marked as inactive.')
raise PMMailUnprocessableEntityException('Unprocessable Entity: %s' % desc)
elif err.code == 500:
raise PMMailServerErrorException('Internal server error at Postmark. Admins have been alerted.', err)
except URLError as err:
if hasattr(err, 'reason'):
raise PMMailURLException('URLError: Failed to reach the server: %s (See "inner_exception" for details)' % err.reason, err)
elif hasattr(err, 'code'):
raise PMMailURLException('URLError: %d: The server couldn\'t fufill the request. (See "inner_exception" for details)' % err.code, err)
else:
raise PMMailURLException('URLError: The server couldn\'t fufill the request. (See "inner_exception" for details)', err) |
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg) | List files for an installed package | Below is the the instruction that describes the task:
### Input:
List files for an installed package
### Response:
def _list_files(self, args):
'''
List files for an installed package
'''
if len(args) < 2:
raise SPMInvocationError('A package name must be specified')
package = args[-1]
files = self._pkgdb_fun('list_files', package, self.db_conn)
if files is None:
raise SPMPackageError('package {0} not installed'.format(package))
else:
for file_ in files:
if self.opts['verbose']:
status_msg = ','.join(file_)
else:
status_msg = file_[0]
self.ui.status(status_msg) |
def to_json(df, x, y, timeseries=False):
"""Format output for json response."""
values = {k: [] for k in y}
for i, row in df.iterrows():
for yy in y:
values[yy].append({
"x": row[x],
"y": row[yy]
})
return {"result": [values[k] for k in y], "date": timeseries} | Format output for json response. | Below is the the instruction that describes the task:
### Input:
Format output for json response.
### Response:
def to_json(df, x, y, timeseries=False):
"""Format output for json response."""
values = {k: [] for k in y}
for i, row in df.iterrows():
for yy in y:
values[yy].append({
"x": row[x],
"y": row[yy]
})
return {"result": [values[k] for k in y], "date": timeseries} |
def heightmap_get_normal(
hm: np.ndarray, x: float, y: float, waterLevel: float
) -> Tuple[float, float, float]:
"""Return the map normal at given coordinates.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
x (float): The x coordinate.
y (float): The y coordinate.
waterLevel (float): The heightmap is considered flat below this value.
Returns:
Tuple[float, float, float]: An (x, y, z) vector normal.
"""
cn = ffi.new("float[3]")
lib.TCOD_heightmap_get_normal(_heightmap_cdata(hm), x, y, cn, waterLevel)
return tuple(cn) | Return the map normal at given coordinates.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
x (float): The x coordinate.
y (float): The y coordinate.
waterLevel (float): The heightmap is considered flat below this value.
Returns:
Tuple[float, float, float]: An (x, y, z) vector normal. | Below is the the instruction that describes the task:
### Input:
Return the map normal at given coordinates.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
x (float): The x coordinate.
y (float): The y coordinate.
waterLevel (float): The heightmap is considered flat below this value.
Returns:
Tuple[float, float, float]: An (x, y, z) vector normal.
### Response:
def heightmap_get_normal(
hm: np.ndarray, x: float, y: float, waterLevel: float
) -> Tuple[float, float, float]:
"""Return the map normal at given coordinates.
Args:
hm (numpy.ndarray): A numpy.ndarray formatted for heightmap functions.
x (float): The x coordinate.
y (float): The y coordinate.
waterLevel (float): The heightmap is considered flat below this value.
Returns:
Tuple[float, float, float]: An (x, y, z) vector normal.
"""
cn = ffi.new("float[3]")
lib.TCOD_heightmap_get_normal(_heightmap_cdata(hm), x, y, cn, waterLevel)
return tuple(cn) |
def cipher(self):
""" Generate AES-cipher
:return: Crypto.Cipher.AES.AESCipher
"""
#cipher = pyAES.new(*self.mode().aes_args(), **self.mode().aes_kwargs())
cipher = Cipher(*self.mode().aes_args(), **self.mode().aes_kwargs())
return WAES.WAESCipher(cipher) | Generate AES-cipher
:return: Crypto.Cipher.AES.AESCipher | Below is the the instruction that describes the task:
### Input:
Generate AES-cipher
:return: Crypto.Cipher.AES.AESCipher
### Response:
def cipher(self):
""" Generate AES-cipher
:return: Crypto.Cipher.AES.AESCipher
"""
#cipher = pyAES.new(*self.mode().aes_args(), **self.mode().aes_kwargs())
cipher = Cipher(*self.mode().aes_args(), **self.mode().aes_kwargs())
return WAES.WAESCipher(cipher) |
async def vcx_agent_provision(config: str) -> None:
"""
Provision an agent in the agency, populate configuration and wallet for this agent.
Example:
import json
enterprise_config = {
'agency_url': 'http://localhost:8080',
'agency_did': 'VsKV7grR1BUE29mG2Fm2kX',
'agency_verkey': "Hezce2UWMZ3wUhVkh2LfKSs8nDzWwzs2Win7EzNN3YaR",
'wallet_name': 'LIBVCX_SDK_WALLET',
'agent_seed': '00000000000000000000000001234561',
'enterprise_seed': '000000000000000000000000Trustee1',
'wallet_key': '1234'
}
vcx_config = await vcx_agent_provision(json.dumps(enterprise_config))
:param config: JSON configuration
:return: Configuration for vcx_init call.
"""
logger = logging.getLogger(__name__)
if not hasattr(vcx_agent_provision, "cb"):
logger.debug("vcx_agent_provision: Creating callback")
vcx_agent_provision.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_config = c_char_p(config.encode('utf-8'))
result = await do_call('vcx_agent_provision_async',
c_config,
vcx_agent_provision.cb)
logger.debug("vcx_agent_provision completed")
return result.decode() | Provision an agent in the agency, populate configuration and wallet for this agent.
Example:
import json
enterprise_config = {
'agency_url': 'http://localhost:8080',
'agency_did': 'VsKV7grR1BUE29mG2Fm2kX',
'agency_verkey': "Hezce2UWMZ3wUhVkh2LfKSs8nDzWwzs2Win7EzNN3YaR",
'wallet_name': 'LIBVCX_SDK_WALLET',
'agent_seed': '00000000000000000000000001234561',
'enterprise_seed': '000000000000000000000000Trustee1',
'wallet_key': '1234'
}
vcx_config = await vcx_agent_provision(json.dumps(enterprise_config))
:param config: JSON configuration
:return: Configuration for vcx_init call. | Below is the the instruction that describes the task:
### Input:
Provision an agent in the agency, populate configuration and wallet for this agent.
Example:
import json
enterprise_config = {
'agency_url': 'http://localhost:8080',
'agency_did': 'VsKV7grR1BUE29mG2Fm2kX',
'agency_verkey': "Hezce2UWMZ3wUhVkh2LfKSs8nDzWwzs2Win7EzNN3YaR",
'wallet_name': 'LIBVCX_SDK_WALLET',
'agent_seed': '00000000000000000000000001234561',
'enterprise_seed': '000000000000000000000000Trustee1',
'wallet_key': '1234'
}
vcx_config = await vcx_agent_provision(json.dumps(enterprise_config))
:param config: JSON configuration
:return: Configuration for vcx_init call.
### Response:
async def vcx_agent_provision(config: str) -> None:
"""
Provision an agent in the agency, populate configuration and wallet for this agent.
Example:
import json
enterprise_config = {
'agency_url': 'http://localhost:8080',
'agency_did': 'VsKV7grR1BUE29mG2Fm2kX',
'agency_verkey': "Hezce2UWMZ3wUhVkh2LfKSs8nDzWwzs2Win7EzNN3YaR",
'wallet_name': 'LIBVCX_SDK_WALLET',
'agent_seed': '00000000000000000000000001234561',
'enterprise_seed': '000000000000000000000000Trustee1',
'wallet_key': '1234'
}
vcx_config = await vcx_agent_provision(json.dumps(enterprise_config))
:param config: JSON configuration
:return: Configuration for vcx_init call.
"""
logger = logging.getLogger(__name__)
if not hasattr(vcx_agent_provision, "cb"):
logger.debug("vcx_agent_provision: Creating callback")
vcx_agent_provision.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_char_p))
c_config = c_char_p(config.encode('utf-8'))
result = await do_call('vcx_agent_provision_async',
c_config,
vcx_agent_provision.cb)
logger.debug("vcx_agent_provision completed")
return result.decode() |
def exists(hdfs_path, user=None):
"""
Return :obj:`True` if ``hdfs_path`` exists in the default HDFS.
"""
hostname, port, path = split(hdfs_path, user=user)
fs = hdfs_fs.hdfs(hostname, port)
retval = fs.exists(path)
fs.close()
return retval | Return :obj:`True` if ``hdfs_path`` exists in the default HDFS. | Below is the the instruction that describes the task:
### Input:
Return :obj:`True` if ``hdfs_path`` exists in the default HDFS.
### Response:
def exists(hdfs_path, user=None):
"""
Return :obj:`True` if ``hdfs_path`` exists in the default HDFS.
"""
hostname, port, path = split(hdfs_path, user=user)
fs = hdfs_fs.hdfs(hostname, port)
retval = fs.exists(path)
fs.close()
return retval |
def parser_functions(self) -> List['ParserFunction']:
"""Return a list of parser function objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
ParserFunction(_lststr, _type_to_spans, span, 'ParserFunction')
for span in self._subspans('ParserFunction')] | Return a list of parser function objects. | Below is the the instruction that describes the task:
### Input:
Return a list of parser function objects.
### Response:
def parser_functions(self) -> List['ParserFunction']:
"""Return a list of parser function objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
ParserFunction(_lststr, _type_to_spans, span, 'ParserFunction')
for span in self._subspans('ParserFunction')] |
def loudest_time(self, start=0, duration=0):
"""Find the loudest time in the window given by start and duration
Returns frame number in context of entire track, not just the window.
:param integer start: Start frame
:param integer duration: Number of frames to consider from start
:returns: Frame number of loudest frame
:rtype: integer
"""
if duration == 0:
duration = self.sound.nframes
self.current_frame = start
arr = self.read_frames(duration)
# get the frame of the maximum amplitude
# different names for the same thing...
# max_amp_sample = a.argmax(axis=0)[a.max(axis=0).argmax()]
max_amp_sample = int(np.floor(arr.argmax()/2)) + start
return max_amp_sample | Find the loudest time in the window given by start and duration
Returns frame number in context of entire track, not just the window.
:param integer start: Start frame
:param integer duration: Number of frames to consider from start
:returns: Frame number of loudest frame
:rtype: integer | Below is the the instruction that describes the task:
### Input:
Find the loudest time in the window given by start and duration
Returns frame number in context of entire track, not just the window.
:param integer start: Start frame
:param integer duration: Number of frames to consider from start
:returns: Frame number of loudest frame
:rtype: integer
### Response:
def loudest_time(self, start=0, duration=0):
"""Find the loudest time in the window given by start and duration
Returns frame number in context of entire track, not just the window.
:param integer start: Start frame
:param integer duration: Number of frames to consider from start
:returns: Frame number of loudest frame
:rtype: integer
"""
if duration == 0:
duration = self.sound.nframes
self.current_frame = start
arr = self.read_frames(duration)
# get the frame of the maximum amplitude
# different names for the same thing...
# max_amp_sample = a.argmax(axis=0)[a.max(axis=0).argmax()]
max_amp_sample = int(np.floor(arr.argmax()/2)) + start
return max_amp_sample |
def parse(self, rrstr):
# type: (bytes) -> int
'''
Parse a Rock Ridge POSIX File Attributes record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
A string representing the RR version, either 1.09 or 1.12.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('PX record already initialized!')
(su_len, su_entry_version_unused, posix_file_mode_le, posix_file_mode_be,
posix_file_links_le, posix_file_links_be, posix_file_user_id_le,
posix_file_user_id_be, posix_file_group_id_le,
posix_file_group_id_be) = struct.unpack_from('=BBLLLLLLLL', rrstr[:38], 2)
# We assume that the caller has already checked the su_entry_version,
# so we don't bother.
if posix_file_mode_le != utils.swab_32bit(posix_file_mode_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file mode do not agree')
if posix_file_links_le != utils.swab_32bit(posix_file_links_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file links do not agree')
if posix_file_user_id_le != utils.swab_32bit(posix_file_user_id_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file user ID do not agree')
if posix_file_group_id_le != utils.swab_32bit(posix_file_group_id_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file group ID do not agree')
# In Rock Ridge 1.09 and 1.10, there is no serial number so the su_len
# is 36, while in Rock Ridge 1.12, there is an 8-byte serial number so
# su_len is 44.
if su_len == 36:
posix_file_serial_number_le = 0
elif su_len == 44:
(posix_file_serial_number_le,
posix_file_serial_number_be) = struct.unpack_from('=LL',
rrstr[:44], 36)
if posix_file_serial_number_le != utils.swab_32bit(posix_file_serial_number_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file serial number do not agree')
else:
raise pycdlibexception.PyCdlibInvalidISO('Invalid length on Rock Ridge PX record')
self.posix_file_mode = posix_file_mode_le
self.posix_file_links = posix_file_links_le
self.posix_user_id = posix_file_user_id_le
self.posix_group_id = posix_file_group_id_le
self.posix_serial_number = posix_file_serial_number_le
self._initialized = True
return su_len | Parse a Rock Ridge POSIX File Attributes record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
A string representing the RR version, either 1.09 or 1.12. | Below is the the instruction that describes the task:
### Input:
Parse a Rock Ridge POSIX File Attributes record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
A string representing the RR version, either 1.09 or 1.12.
### Response:
def parse(self, rrstr):
# type: (bytes) -> int
'''
Parse a Rock Ridge POSIX File Attributes record out of a string.
Parameters:
rrstr - The string to parse the record out of.
Returns:
A string representing the RR version, either 1.09 or 1.12.
'''
if self._initialized:
raise pycdlibexception.PyCdlibInternalError('PX record already initialized!')
(su_len, su_entry_version_unused, posix_file_mode_le, posix_file_mode_be,
posix_file_links_le, posix_file_links_be, posix_file_user_id_le,
posix_file_user_id_be, posix_file_group_id_le,
posix_file_group_id_be) = struct.unpack_from('=BBLLLLLLLL', rrstr[:38], 2)
# We assume that the caller has already checked the su_entry_version,
# so we don't bother.
if posix_file_mode_le != utils.swab_32bit(posix_file_mode_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file mode do not agree')
if posix_file_links_le != utils.swab_32bit(posix_file_links_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file links do not agree')
if posix_file_user_id_le != utils.swab_32bit(posix_file_user_id_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file user ID do not agree')
if posix_file_group_id_le != utils.swab_32bit(posix_file_group_id_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file group ID do not agree')
# In Rock Ridge 1.09 and 1.10, there is no serial number so the su_len
# is 36, while in Rock Ridge 1.12, there is an 8-byte serial number so
# su_len is 44.
if su_len == 36:
posix_file_serial_number_le = 0
elif su_len == 44:
(posix_file_serial_number_le,
posix_file_serial_number_be) = struct.unpack_from('=LL',
rrstr[:44], 36)
if posix_file_serial_number_le != utils.swab_32bit(posix_file_serial_number_be):
raise pycdlibexception.PyCdlibInvalidISO('PX record big and little-endian file serial number do not agree')
else:
raise pycdlibexception.PyCdlibInvalidISO('Invalid length on Rock Ridge PX record')
self.posix_file_mode = posix_file_mode_le
self.posix_file_links = posix_file_links_le
self.posix_user_id = posix_file_user_id_le
self.posix_group_id = posix_file_group_id_le
self.posix_serial_number = posix_file_serial_number_le
self._initialized = True
return su_len |
def observe(self, event_name, func):
"""
event_name := {'created', 'modified', 'deleted'}, list, tuple
Attaches a function to run to a particular event. The function must be
unique to be removed cleanly. Alternatively, event_name can be an list/
tuple if any of the string possibilities to be added on multiple events
"""
if isinstance(event_name, list) or isinstance(event_name, tuple):
for name in event_name:
self.observe(name, func)
return
self.log(func.__name__, "attached to", event_name)
self._modify_event(event_name, 'append', func) | event_name := {'created', 'modified', 'deleted'}, list, tuple
Attaches a function to run to a particular event. The function must be
unique to be removed cleanly. Alternatively, event_name can be an list/
tuple if any of the string possibilities to be added on multiple events | Below is the the instruction that describes the task:
### Input:
event_name := {'created', 'modified', 'deleted'}, list, tuple
Attaches a function to run to a particular event. The function must be
unique to be removed cleanly. Alternatively, event_name can be an list/
tuple if any of the string possibilities to be added on multiple events
### Response:
def observe(self, event_name, func):
"""
event_name := {'created', 'modified', 'deleted'}, list, tuple
Attaches a function to run to a particular event. The function must be
unique to be removed cleanly. Alternatively, event_name can be an list/
tuple if any of the string possibilities to be added on multiple events
"""
if isinstance(event_name, list) or isinstance(event_name, tuple):
for name in event_name:
self.observe(name, func)
return
self.log(func.__name__, "attached to", event_name)
self._modify_event(event_name, 'append', func) |
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0 | Returns
-------
boolean :
longitudinal status of this project | Below is the the instruction that describes the task:
### Input:
Returns
-------
boolean :
longitudinal status of this project
### Response:
def is_longitudinal(self):
"""
Returns
-------
boolean :
longitudinal status of this project
"""
return len(self.events) > 0 and \
len(self.arm_nums) > 0 and \
len(self.arm_names) > 0 |
def get_features(model_description_features):
"""Get features from a list of dictionaries
Parameters
----------
model_description_features : list of dictionaries
Examples
--------
>>> l = [{'StrokeCount': None}, \
{'ConstantPointCoordinates': \
[{'strokes': 4}, \
{'points_per_stroke': 81}, \
{'fill_empty_with': 0}, \
{'pen_down': False}] \
} \
]
>>> get_features(l)
[StrokeCount, ConstantPointCoordinates
- strokes: 4
- points per stroke: 81
- fill empty with: 0
- pen down feature: False
]
"""
return utils.get_objectlist(model_description_features,
config_key='features',
module=sys.modules[__name__]) | Get features from a list of dictionaries
Parameters
----------
model_description_features : list of dictionaries
Examples
--------
>>> l = [{'StrokeCount': None}, \
{'ConstantPointCoordinates': \
[{'strokes': 4}, \
{'points_per_stroke': 81}, \
{'fill_empty_with': 0}, \
{'pen_down': False}] \
} \
]
>>> get_features(l)
[StrokeCount, ConstantPointCoordinates
- strokes: 4
- points per stroke: 81
- fill empty with: 0
- pen down feature: False
] | Below is the the instruction that describes the task:
### Input:
Get features from a list of dictionaries
Parameters
----------
model_description_features : list of dictionaries
Examples
--------
>>> l = [{'StrokeCount': None}, \
{'ConstantPointCoordinates': \
[{'strokes': 4}, \
{'points_per_stroke': 81}, \
{'fill_empty_with': 0}, \
{'pen_down': False}] \
} \
]
>>> get_features(l)
[StrokeCount, ConstantPointCoordinates
- strokes: 4
- points per stroke: 81
- fill empty with: 0
- pen down feature: False
]
### Response:
def get_features(model_description_features):
"""Get features from a list of dictionaries
Parameters
----------
model_description_features : list of dictionaries
Examples
--------
>>> l = [{'StrokeCount': None}, \
{'ConstantPointCoordinates': \
[{'strokes': 4}, \
{'points_per_stroke': 81}, \
{'fill_empty_with': 0}, \
{'pen_down': False}] \
} \
]
>>> get_features(l)
[StrokeCount, ConstantPointCoordinates
- strokes: 4
- points per stroke: 81
- fill empty with: 0
- pen down feature: False
]
"""
return utils.get_objectlist(model_description_features,
config_key='features',
module=sys.modules[__name__]) |
def _make_sampling_sequence(n):
# type: (int) -> List[int]
"""
Return a list containing the proposed call event sampling sequence.
Return events are paired with call events and not counted separately.
This is 0, 1, 2, ..., 4 plus 50, 100, 150, 200, etc.
The total list size is n.
"""
seq = list(range(5))
i = 50
while len(seq) < n:
seq.append(i)
i += 50
return seq | Return a list containing the proposed call event sampling sequence.
Return events are paired with call events and not counted separately.
This is 0, 1, 2, ..., 4 plus 50, 100, 150, 200, etc.
The total list size is n. | Below is the the instruction that describes the task:
### Input:
Return a list containing the proposed call event sampling sequence.
Return events are paired with call events and not counted separately.
This is 0, 1, 2, ..., 4 plus 50, 100, 150, 200, etc.
The total list size is n.
### Response:
def _make_sampling_sequence(n):
# type: (int) -> List[int]
"""
Return a list containing the proposed call event sampling sequence.
Return events are paired with call events and not counted separately.
This is 0, 1, 2, ..., 4 plus 50, 100, 150, 200, etc.
The total list size is n.
"""
seq = list(range(5))
i = 50
while len(seq) < n:
seq.append(i)
i += 50
return seq |
def minute(self):
'''set unit to minute'''
self.magnification = 60
self._update(self.baseNumber, self.magnification)
return self | set unit to minute | Below is the the instruction that describes the task:
### Input:
set unit to minute
### Response:
def minute(self):
'''set unit to minute'''
self.magnification = 60
self._update(self.baseNumber, self.magnification)
return self |
def run(self):
"""
updates the modules output.
Currently only time and tztime need to do this
"""
if self.update_time_value():
self.i3status.py3_wrapper.notify_update(self.module_name)
due_time = self.py3.time_in(sync_to=self.time_delta)
self.i3status.py3_wrapper.timeout_queue_add(self, due_time) | updates the modules output.
Currently only time and tztime need to do this | Below is the the instruction that describes the task:
### Input:
updates the modules output.
Currently only time and tztime need to do this
### Response:
def run(self):
"""
updates the modules output.
Currently only time and tztime need to do this
"""
if self.update_time_value():
self.i3status.py3_wrapper.notify_update(self.module_name)
due_time = self.py3.time_in(sync_to=self.time_delta)
self.i3status.py3_wrapper.timeout_queue_add(self, due_time) |
def getObjectsInHouse(self, house):
""" Returns a list with all objects in a house. """
res = [obj for obj in self if house.hasObject(obj)]
return ObjectList(res) | Returns a list with all objects in a house. | Below is the the instruction that describes the task:
### Input:
Returns a list with all objects in a house.
### Response:
def getObjectsInHouse(self, house):
""" Returns a list with all objects in a house. """
res = [obj for obj in self if house.hasObject(obj)]
return ObjectList(res) |
def result(self, s, a):
'''Result of applying an action to a state.'''
# result: boat on opposite side, and numbers of missioners and
# cannibals updated according to the move
if s[2] == 0:
return (s[0] - a[1][0], s[1] - a[1][1], 1)
else:
return (s[0] + a[1][0], s[1] + a[1][1], 0) | Result of applying an action to a state. | Below is the the instruction that describes the task:
### Input:
Result of applying an action to a state.
### Response:
def result(self, s, a):
'''Result of applying an action to a state.'''
# result: boat on opposite side, and numbers of missioners and
# cannibals updated according to the move
if s[2] == 0:
return (s[0] - a[1][0], s[1] - a[1][1], 1)
else:
return (s[0] + a[1][0], s[1] + a[1][1], 0) |
def _check_inputs(self, operators, weights):
""" Check Inputs
This method cheks that the input operators and weights are correctly
formatted
Parameters
----------
operators : list, tuple or np.ndarray
List of linear operator class instances
weights : list, tuple or np.ndarray
List of weights for combining the linear adjoint operator results
Returns
-------
tuple operators and weights
Raises
------
ValueError
If the number of weights does not match the number of operators
TypeError
If the individual weight values are not floats
"""
operators = self._check_type(operators)
for operator in operators:
if not hasattr(operator, 'op'):
raise ValueError('Operators must contain "op" method.')
if not hasattr(operator, 'adj_op'):
raise ValueError('Operators must contain "adj_op" method.')
operator.op = check_callable(operator.op)
operator.cost = check_callable(operator.adj_op)
if not isinstance(weights, type(None)):
weights = self._check_type(weights)
if weights.size != operators.size:
raise ValueError('The number of weights must match the '
'number of operators.')
if not np.issubdtype(weights.dtype, np.floating):
raise TypeError('The weights must be a list of float values.')
return operators, weights | Check Inputs
This method cheks that the input operators and weights are correctly
formatted
Parameters
----------
operators : list, tuple or np.ndarray
List of linear operator class instances
weights : list, tuple or np.ndarray
List of weights for combining the linear adjoint operator results
Returns
-------
tuple operators and weights
Raises
------
ValueError
If the number of weights does not match the number of operators
TypeError
If the individual weight values are not floats | Below is the the instruction that describes the task:
### Input:
Check Inputs
This method cheks that the input operators and weights are correctly
formatted
Parameters
----------
operators : list, tuple or np.ndarray
List of linear operator class instances
weights : list, tuple or np.ndarray
List of weights for combining the linear adjoint operator results
Returns
-------
tuple operators and weights
Raises
------
ValueError
If the number of weights does not match the number of operators
TypeError
If the individual weight values are not floats
### Response:
def _check_inputs(self, operators, weights):
""" Check Inputs
This method cheks that the input operators and weights are correctly
formatted
Parameters
----------
operators : list, tuple or np.ndarray
List of linear operator class instances
weights : list, tuple or np.ndarray
List of weights for combining the linear adjoint operator results
Returns
-------
tuple operators and weights
Raises
------
ValueError
If the number of weights does not match the number of operators
TypeError
If the individual weight values are not floats
"""
operators = self._check_type(operators)
for operator in operators:
if not hasattr(operator, 'op'):
raise ValueError('Operators must contain "op" method.')
if not hasattr(operator, 'adj_op'):
raise ValueError('Operators must contain "adj_op" method.')
operator.op = check_callable(operator.op)
operator.cost = check_callable(operator.adj_op)
if not isinstance(weights, type(None)):
weights = self._check_type(weights)
if weights.size != operators.size:
raise ValueError('The number of weights must match the '
'number of operators.')
if not np.issubdtype(weights.dtype, np.floating):
raise TypeError('The weights must be a list of float values.')
return operators, weights |
def create_dataset(self,
name,
x_img_size,
y_img_size,
z_img_size,
x_vox_res,
y_vox_res,
z_vox_res,
x_offset=0,
y_offset=0,
z_offset=0,
scaling_levels=0,
scaling_option=0,
dataset_description="",
is_public=0):
"""
Creates a dataset.
Arguments:
name (str): Name of dataset
x_img_size (int): max x coordinate of image size
y_img_size (int): max y coordinate of image size
z_img_size (int): max z coordinate of image size
x_vox_res (float): x voxel resolution
y_vox_res (float): y voxel resolution
z_vox_res (float): z voxel resolution
x_offset (int): x offset amount
y_offset (int): y offset amount
z_offset (int): z offset amount
scaling_levels (int): Level of resolution scaling
scaling_option (int): Z slices is 0 or Isotropic is 1
dataset_description (str): Your description of the dataset
is_public (int): 1 'true' or 0 'false' for viewability of data set
in public
Returns:
bool: True if dataset created, False if not
"""
return self.resources.create_dataset(name,
x_img_size,
y_img_size,
z_img_size,
x_vox_res,
y_vox_res,
z_vox_res,
x_offset,
y_offset,
z_offset,
scaling_levels,
scaling_option,
dataset_description,
is_public) | Creates a dataset.
Arguments:
name (str): Name of dataset
x_img_size (int): max x coordinate of image size
y_img_size (int): max y coordinate of image size
z_img_size (int): max z coordinate of image size
x_vox_res (float): x voxel resolution
y_vox_res (float): y voxel resolution
z_vox_res (float): z voxel resolution
x_offset (int): x offset amount
y_offset (int): y offset amount
z_offset (int): z offset amount
scaling_levels (int): Level of resolution scaling
scaling_option (int): Z slices is 0 or Isotropic is 1
dataset_description (str): Your description of the dataset
is_public (int): 1 'true' or 0 'false' for viewability of data set
in public
Returns:
bool: True if dataset created, False if not | Below is the the instruction that describes the task:
### Input:
Creates a dataset.
Arguments:
name (str): Name of dataset
x_img_size (int): max x coordinate of image size
y_img_size (int): max y coordinate of image size
z_img_size (int): max z coordinate of image size
x_vox_res (float): x voxel resolution
y_vox_res (float): y voxel resolution
z_vox_res (float): z voxel resolution
x_offset (int): x offset amount
y_offset (int): y offset amount
z_offset (int): z offset amount
scaling_levels (int): Level of resolution scaling
scaling_option (int): Z slices is 0 or Isotropic is 1
dataset_description (str): Your description of the dataset
is_public (int): 1 'true' or 0 'false' for viewability of data set
in public
Returns:
bool: True if dataset created, False if not
### Response:
def create_dataset(self,
name,
x_img_size,
y_img_size,
z_img_size,
x_vox_res,
y_vox_res,
z_vox_res,
x_offset=0,
y_offset=0,
z_offset=0,
scaling_levels=0,
scaling_option=0,
dataset_description="",
is_public=0):
"""
Creates a dataset.
Arguments:
name (str): Name of dataset
x_img_size (int): max x coordinate of image size
y_img_size (int): max y coordinate of image size
z_img_size (int): max z coordinate of image size
x_vox_res (float): x voxel resolution
y_vox_res (float): y voxel resolution
z_vox_res (float): z voxel resolution
x_offset (int): x offset amount
y_offset (int): y offset amount
z_offset (int): z offset amount
scaling_levels (int): Level of resolution scaling
scaling_option (int): Z slices is 0 or Isotropic is 1
dataset_description (str): Your description of the dataset
is_public (int): 1 'true' or 0 'false' for viewability of data set
in public
Returns:
bool: True if dataset created, False if not
"""
return self.resources.create_dataset(name,
x_img_size,
y_img_size,
z_img_size,
x_vox_res,
y_vox_res,
z_vox_res,
x_offset,
y_offset,
z_offset,
scaling_levels,
scaling_option,
dataset_description,
is_public) |
def create_tags_with_concatenated_css_classes(tags):
"""Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags.
"""
current_classes = set()
result = []
for pos, group in group_tags_at_same_position(tags):
opening, closing = get_opening_closing_tags(group)
# handle closing tags at current position
closing_added = False
if len(closing) > 0:
closing_tag = Tag(pos, False, '')
for tag in closing:
current_classes.remove(tag.css_class)
result.append(closing_tag)
closing_added = True
# handle opening tags at current position
opening_added = False
if len(opening) > 0:
# handle the begin of an overlap
if not closing_added and len(current_classes) > 0:
result.append(Tag(pos, False, ''))
for tag in opening:
current_classes.add(tag.css_class)
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
opening_added = True
# handle the end of an overlap
if closing_added and not opening_added and len(current_classes) > 0:
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
return result | Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags. | Below is the the instruction that describes the task:
### Input:
Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags.
### Response:
def create_tags_with_concatenated_css_classes(tags):
"""Function that creates <mark> tags such that they are not overlapping.
In order to do this, it concatenates the css classes and stores the concatenated
result in new tags.
"""
current_classes = set()
result = []
for pos, group in group_tags_at_same_position(tags):
opening, closing = get_opening_closing_tags(group)
# handle closing tags at current position
closing_added = False
if len(closing) > 0:
closing_tag = Tag(pos, False, '')
for tag in closing:
current_classes.remove(tag.css_class)
result.append(closing_tag)
closing_added = True
# handle opening tags at current position
opening_added = False
if len(opening) > 0:
# handle the begin of an overlap
if not closing_added and len(current_classes) > 0:
result.append(Tag(pos, False, ''))
for tag in opening:
current_classes.add(tag.css_class)
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
opening_added = True
# handle the end of an overlap
if closing_added and not opening_added and len(current_classes) > 0:
opening_tag = Tag(pos, True, ' '.join(sorted(current_classes)))
result.append(opening_tag)
return result |
async def fetch_block(self, request):
"""Fetches a specific block from the validator, specified by id.
Request:
path:
- block_id: The 128-character id of the block to be fetched
Response:
data: A JSON object with the data from the fully expanded Block
link: The link to this exact query
"""
error_traps = [error_handlers.BlockNotFoundTrap]
block_id = request.match_info.get('block_id', '')
self._validate_id(block_id)
response = await self._query_validator(
Message.CLIENT_BLOCK_GET_BY_ID_REQUEST,
client_block_pb2.ClientBlockGetResponse,
client_block_pb2.ClientBlockGetByIdRequest(block_id=block_id),
error_traps)
return self._wrap_response(
request,
data=self._expand_block(response['block']),
metadata=self._get_metadata(request, response)) | Fetches a specific block from the validator, specified by id.
Request:
path:
- block_id: The 128-character id of the block to be fetched
Response:
data: A JSON object with the data from the fully expanded Block
link: The link to this exact query | Below is the the instruction that describes the task:
### Input:
Fetches a specific block from the validator, specified by id.
Request:
path:
- block_id: The 128-character id of the block to be fetched
Response:
data: A JSON object with the data from the fully expanded Block
link: The link to this exact query
### Response:
async def fetch_block(self, request):
"""Fetches a specific block from the validator, specified by id.
Request:
path:
- block_id: The 128-character id of the block to be fetched
Response:
data: A JSON object with the data from the fully expanded Block
link: The link to this exact query
"""
error_traps = [error_handlers.BlockNotFoundTrap]
block_id = request.match_info.get('block_id', '')
self._validate_id(block_id)
response = await self._query_validator(
Message.CLIENT_BLOCK_GET_BY_ID_REQUEST,
client_block_pb2.ClientBlockGetResponse,
client_block_pb2.ClientBlockGetByIdRequest(block_id=block_id),
error_traps)
return self._wrap_response(
request,
data=self._expand_block(response['block']),
metadata=self._get_metadata(request, response)) |
def send(self, line):
'''send some bytes'''
line = line.strip()
if line == ".":
self.stop()
return
mav = self.master.mav
if line != '+++':
line += "\r\n"
buf = [ord(x) for x in line]
buf.extend([0]*(70-len(buf)))
flags = mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND
flags |= mavutil.mavlink.SERIAL_CONTROL_FLAG_MULTI
flags |= mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE
mav.serial_control_send(self.serial_settings.port,
flags,
0, self.serial_settings.baudrate,
len(line), buf) | send some bytes | Below is the the instruction that describes the task:
### Input:
send some bytes
### Response:
def send(self, line):
'''send some bytes'''
line = line.strip()
if line == ".":
self.stop()
return
mav = self.master.mav
if line != '+++':
line += "\r\n"
buf = [ord(x) for x in line]
buf.extend([0]*(70-len(buf)))
flags = mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND
flags |= mavutil.mavlink.SERIAL_CONTROL_FLAG_MULTI
flags |= mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE
mav.serial_control_send(self.serial_settings.port,
flags,
0, self.serial_settings.baudrate,
len(line), buf) |
def filename(value):
'''
Remove everything that would affect paths in the filename
:param value:
:return:
'''
return re.sub('[^a-zA-Z0-9.-_ ]', '', os.path.basename(InputSanitizer.trim(value))) | Remove everything that would affect paths in the filename
:param value:
:return: | Below is the the instruction that describes the task:
### Input:
Remove everything that would affect paths in the filename
:param value:
:return:
### Response:
def filename(value):
'''
Remove everything that would affect paths in the filename
:param value:
:return:
'''
return re.sub('[^a-zA-Z0-9.-_ ]', '', os.path.basename(InputSanitizer.trim(value))) |
def delete_table_rate_shipping_by_id(cls, table_rate_shipping_id, **kwargs):
"""Delete TableRateShipping
Delete an instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
else:
(data) = cls._delete_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
return data | Delete TableRateShipping
Delete an instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Delete TableRateShipping
Delete an instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def delete_table_rate_shipping_by_id(cls, table_rate_shipping_id, **kwargs):
"""Delete TableRateShipping
Delete an instance of TableRateShipping by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_table_rate_shipping_by_id(table_rate_shipping_id, async=True)
>>> result = thread.get()
:param async bool
:param str table_rate_shipping_id: ID of tableRateShipping to delete. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._delete_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
else:
(data) = cls._delete_table_rate_shipping_by_id_with_http_info(table_rate_shipping_id, **kwargs)
return data |
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append("%s-" % begin if begin >= 0 else str(begin))
else:
ranges.append("%s-%s" % (begin, end - 1))
return "%s=%s" % (self.units, ",".join(ranges)) | Converts the object back into an HTTP header. | Below is the the instruction that describes the task:
### Input:
Converts the object back into an HTTP header.
### Response:
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append("%s-" % begin if begin >= 0 else str(begin))
else:
ranges.append("%s-%s" % (begin, end - 1))
return "%s=%s" % (self.units, ",".join(ranges)) |
def yum_install_from_url(pkg_name, url):
""" installs a pkg from a url
p pkg_name: the name of the package to install
p url: the full URL for the rpm package
"""
if is_package_installed(distribution='el', pkg=pkg_name) is False:
log_green(
"installing %s from %s" % (pkg_name, url))
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
result = sudo("rpm -i %s" % url)
if result.return_code == 0:
return True
elif result.return_code == 1:
return False
else: # print error to user
print(result)
raise SystemExit() | installs a pkg from a url
p pkg_name: the name of the package to install
p url: the full URL for the rpm package | Below is the the instruction that describes the task:
### Input:
installs a pkg from a url
p pkg_name: the name of the package to install
p url: the full URL for the rpm package
### Response:
def yum_install_from_url(pkg_name, url):
""" installs a pkg from a url
p pkg_name: the name of the package to install
p url: the full URL for the rpm package
"""
if is_package_installed(distribution='el', pkg=pkg_name) is False:
log_green(
"installing %s from %s" % (pkg_name, url))
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True, capture=True):
result = sudo("rpm -i %s" % url)
if result.return_code == 0:
return True
elif result.return_code == 1:
return False
else: # print error to user
print(result)
raise SystemExit() |
def islitlet_progress(islitlet, islitlet_max):
"""Auxiliary function to print out progress in loop of slitlets.
Parameters
----------
islitlet : int
Current slitlet number.
islitlet_max : int
Maximum slitlet number.
"""
if islitlet % 10 == 0:
cout = str(islitlet // 10)
else:
cout = '.'
sys.stdout.write(cout)
if islitlet == islitlet_max:
sys.stdout.write('\n')
sys.stdout.flush() | Auxiliary function to print out progress in loop of slitlets.
Parameters
----------
islitlet : int
Current slitlet number.
islitlet_max : int
Maximum slitlet number. | Below is the the instruction that describes the task:
### Input:
Auxiliary function to print out progress in loop of slitlets.
Parameters
----------
islitlet : int
Current slitlet number.
islitlet_max : int
Maximum slitlet number.
### Response:
def islitlet_progress(islitlet, islitlet_max):
"""Auxiliary function to print out progress in loop of slitlets.
Parameters
----------
islitlet : int
Current slitlet number.
islitlet_max : int
Maximum slitlet number.
"""
if islitlet % 10 == 0:
cout = str(islitlet // 10)
else:
cout = '.'
sys.stdout.write(cout)
if islitlet == islitlet_max:
sys.stdout.write('\n')
sys.stdout.flush() |
def age(self, id):
""" Returns the age of the cache entry, in days.
"""
path = self.hash(id)
if os.path.exists(path):
modified = datetime.datetime.fromtimestamp(os.stat(path)[8])
age = datetime.datetime.today() - modified
return age.days
else:
return 0 | Returns the age of the cache entry, in days. | Below is the the instruction that describes the task:
### Input:
Returns the age of the cache entry, in days.
### Response:
def age(self, id):
""" Returns the age of the cache entry, in days.
"""
path = self.hash(id)
if os.path.exists(path):
modified = datetime.datetime.fromtimestamp(os.stat(path)[8])
age = datetime.datetime.today() - modified
return age.days
else:
return 0 |
def gen_table(self, inner_widths, inner_heights, outer_widths):
"""Combine everything and yield every line of the entire table with borders.
:param iter inner_widths: List of widths (no padding) for each column.
:param iter inner_heights: List of heights (no padding) for each row.
:param iter outer_widths: List of widths (with padding) for each column.
:return:
"""
for i, row in enumerate(self.table_data):
# Yield the row line by line (e.g. multi-line rows).
for line in self.gen_row_lines(row, 'row', inner_widths, inner_heights[i]):
yield line
# Yield heading separator.
if i == 0:
yield self.horizontal_border(None, outer_widths) | Combine everything and yield every line of the entire table with borders.
:param iter inner_widths: List of widths (no padding) for each column.
:param iter inner_heights: List of heights (no padding) for each row.
:param iter outer_widths: List of widths (with padding) for each column.
:return: | Below is the the instruction that describes the task:
### Input:
Combine everything and yield every line of the entire table with borders.
:param iter inner_widths: List of widths (no padding) for each column.
:param iter inner_heights: List of heights (no padding) for each row.
:param iter outer_widths: List of widths (with padding) for each column.
:return:
### Response:
def gen_table(self, inner_widths, inner_heights, outer_widths):
"""Combine everything and yield every line of the entire table with borders.
:param iter inner_widths: List of widths (no padding) for each column.
:param iter inner_heights: List of heights (no padding) for each row.
:param iter outer_widths: List of widths (with padding) for each column.
:return:
"""
for i, row in enumerate(self.table_data):
# Yield the row line by line (e.g. multi-line rows).
for line in self.gen_row_lines(row, 'row', inner_widths, inner_heights[i]):
yield line
# Yield heading separator.
if i == 0:
yield self.horizontal_border(None, outer_widths) |
def _subset(subset, superset):
"""True if subset is a subset of superset.
:param dict subset: subset to compare.
:param dict superset: superset to compare.
:return: True iif all pairs (key, value) of subset are in superset.
:rtype: bool
"""
result = True
for k in subset:
result = k in superset and subset[k] == superset[k]
if not result:
break
return result | True if subset is a subset of superset.
:param dict subset: subset to compare.
:param dict superset: superset to compare.
:return: True iif all pairs (key, value) of subset are in superset.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
True if subset is a subset of superset.
:param dict subset: subset to compare.
:param dict superset: superset to compare.
:return: True iif all pairs (key, value) of subset are in superset.
:rtype: bool
### Response:
def _subset(subset, superset):
"""True if subset is a subset of superset.
:param dict subset: subset to compare.
:param dict superset: superset to compare.
:return: True iif all pairs (key, value) of subset are in superset.
:rtype: bool
"""
result = True
for k in subset:
result = k in superset and subset[k] == superset[k]
if not result:
break
return result |
def forget_fact(term):
"""
Forgets a fact by removing it from the database
"""
logger.info('Removing fact %s', term)
db.facts.remove({'term': term_regex(term)})
return random.choice(ACKS) | Forgets a fact by removing it from the database | Below is the the instruction that describes the task:
### Input:
Forgets a fact by removing it from the database
### Response:
def forget_fact(term):
"""
Forgets a fact by removing it from the database
"""
logger.info('Removing fact %s', term)
db.facts.remove({'term': term_regex(term)})
return random.choice(ACKS) |
def _render_content(self, content, **settings):
"""
Perform widget rendering, but do not print anything.
"""
result = []
columns = settings[self.SETTING_COLUMNS]
# Format each table cell into string.
(columns, content) = self.table_format(columns, content)
# Enumerate each table row.
if settings[self.SETTING_FLAG_ENUMERATE]:
(columns, content) = self.table_enumerate(columns, content)
# Calculate the dimensions of each table column.
dimensions = self.table_measure(columns, content)
# Display table header.
sb = {k: settings[k] for k in (self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
result.append(self.fmt_border(dimensions, 't', **sb))
if settings[self.SETTING_FLAG_HEADER]:
s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_HEADER_FORMATING]
result.append(self.fmt_row_header(columns, dimensions, **s))
result.append(self.fmt_border(dimensions, 'm', **sb))
# Display table body.
for row in content:
s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_TEXT_FORMATING]
result.append(self.fmt_row(columns, dimensions, row, **s))
# Display table footer
result.append(self.fmt_border(dimensions, 'b', **sb))
return result | Perform widget rendering, but do not print anything. | Below is the the instruction that describes the task:
### Input:
Perform widget rendering, but do not print anything.
### Response:
def _render_content(self, content, **settings):
"""
Perform widget rendering, but do not print anything.
"""
result = []
columns = settings[self.SETTING_COLUMNS]
# Format each table cell into string.
(columns, content) = self.table_format(columns, content)
# Enumerate each table row.
if settings[self.SETTING_FLAG_ENUMERATE]:
(columns, content) = self.table_enumerate(columns, content)
# Calculate the dimensions of each table column.
dimensions = self.table_measure(columns, content)
# Display table header.
sb = {k: settings[k] for k in (self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
result.append(self.fmt_border(dimensions, 't', **sb))
if settings[self.SETTING_FLAG_HEADER]:
s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_HEADER_FORMATING]
result.append(self.fmt_row_header(columns, dimensions, **s))
result.append(self.fmt_border(dimensions, 'm', **sb))
# Display table body.
for row in content:
s = {k: settings[k] for k in (self.SETTING_FLAG_PLAIN, self.SETTING_BORDER_STYLE, self.SETTING_BORDER_FORMATING)}
s[self.SETTING_TEXT_FORMATING] = settings[self.SETTING_TEXT_FORMATING]
result.append(self.fmt_row(columns, dimensions, row, **s))
# Display table footer
result.append(self.fmt_border(dimensions, 'b', **sb))
return result |
def get_arg_type_descriptors(self):
"""
The parameter type descriptor list for a method, or None for a
field. Type descriptors are shorthand identifiers for the
builtin java types.
"""
if not self.is_method:
return tuple()
tp = _typeseq(self.get_descriptor())
tp = _typeseq(tp[0][1:-1])
return tp | The parameter type descriptor list for a method, or None for a
field. Type descriptors are shorthand identifiers for the
builtin java types. | Below is the the instruction that describes the task:
### Input:
The parameter type descriptor list for a method, or None for a
field. Type descriptors are shorthand identifiers for the
builtin java types.
### Response:
def get_arg_type_descriptors(self):
"""
The parameter type descriptor list for a method, or None for a
field. Type descriptors are shorthand identifiers for the
builtin java types.
"""
if not self.is_method:
return tuple()
tp = _typeseq(self.get_descriptor())
tp = _typeseq(tp[0][1:-1])
return tp |
def add_update_resources(self, resources, ignore_datasetid=False):
# type: (List[Union[hdx.data.resource.Resource,Dict,str]], bool) -> None
"""Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.
Returns:
None
"""
if not isinstance(resources, list):
raise HDXError('Resources should be a list!')
for resource in resources:
self.add_update_resource(resource, ignore_datasetid) | Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.
Returns:
None
### Response:
def add_update_resources(self, resources, ignore_datasetid=False):
# type: (List[Union[hdx.data.resource.Resource,Dict,str]], bool) -> None
"""Add new or update existing resources with new metadata to the dataset
Args:
resources (List[Union[hdx.data.resource.Resource,Dict,str]]): A list of either resource ids or resources metadata from either Resource objects or dictionaries
ignore_datasetid (bool): Whether to ignore dataset id in the resource. Defaults to False.
Returns:
None
"""
if not isinstance(resources, list):
raise HDXError('Resources should be a list!')
for resource in resources:
self.add_update_resource(resource, ignore_datasetid) |
def bump_version(project, source, force_init): # type: (str, str, bool, bool) ->int
"""
Entry point
:return:
"""
file_opener = FileOpener()
# logger.debug("Starting version jiggler...")
jiggler = JiggleVersion(project, source, file_opener, force_init)
logger.debug(
"Current, next : {0} -> {1} : {2}".format(
jiggler.current_version, jiggler.version, jiggler.schema
)
)
if not jiggler.version_finder.validate_current_versions():
logger.debug(unicode(jiggler.version_finder.all_current_versions()))
logger.error("Versions not in sync, won't continue")
die(-1, "Versions not in sync, won't continue")
changed = jiggler.jiggle_all()
logger.debug("Changed {0} files".format(changed))
return changed | Entry point
:return: | Below is the the instruction that describes the task:
### Input:
Entry point
:return:
### Response:
def bump_version(project, source, force_init): # type: (str, str, bool, bool) ->int
"""
Entry point
:return:
"""
file_opener = FileOpener()
# logger.debug("Starting version jiggler...")
jiggler = JiggleVersion(project, source, file_opener, force_init)
logger.debug(
"Current, next : {0} -> {1} : {2}".format(
jiggler.current_version, jiggler.version, jiggler.schema
)
)
if not jiggler.version_finder.validate_current_versions():
logger.debug(unicode(jiggler.version_finder.all_current_versions()))
logger.error("Versions not in sync, won't continue")
die(-1, "Versions not in sync, won't continue")
changed = jiggler.jiggle_all()
logger.debug("Changed {0} files".format(changed))
return changed |
def loadTargetsFromFile(self, filename, cols = None, everyNrows = 1,
delim = ' ', checkEven = 1):
"""
Loads targets from file.
"""
self.targets = self.loadVectors(filename, cols, everyNrows,
delim, checkEven) | Loads targets from file. | Below is the the instruction that describes the task:
### Input:
Loads targets from file.
### Response:
def loadTargetsFromFile(self, filename, cols = None, everyNrows = 1,
delim = ' ', checkEven = 1):
"""
Loads targets from file.
"""
self.targets = self.loadVectors(filename, cols, everyNrows,
delim, checkEven) |
def get_time_delta(time_string: str) -> timedelta:
"""
Takes a time string (1 hours, 10 days, etc.) and returns
a python timedelta object
:param time_string: the time value to convert to a timedelta
:type time_string: str
:returns: datetime.timedelta for relative time
:type datetime.timedelta
"""
rel_time: Pattern = re.compile(
pattern=r"((?P<hours>\d+?)\s+hour)?((?P<minutes>\d+?)\s+minute)?((?P<seconds>\d+?)\s+second)?((?P<days>\d+?)\s+day)?",
# noqa
flags=re.IGNORECASE,
)
parts: Optional[Match[AnyStr]] = rel_time.match(string=time_string)
if not parts:
raise Exception(f"Invalid relative time: {time_string}")
# https://docs.python.org/3/library/re.html#re.Match.groupdict
parts: Dict[str, str] = parts.groupdict()
time_params = {}
if all(value == None for value in parts.values()):
raise Exception(f"Invalid relative time: {time_string}")
for time_unit, magnitude in parts.items():
if magnitude:
time_params[time_unit]: int = int(magnitude)
return timedelta(**time_params) | Takes a time string (1 hours, 10 days, etc.) and returns
a python timedelta object
:param time_string: the time value to convert to a timedelta
:type time_string: str
:returns: datetime.timedelta for relative time
:type datetime.timedelta | Below is the the instruction that describes the task:
### Input:
Takes a time string (1 hours, 10 days, etc.) and returns
a python timedelta object
:param time_string: the time value to convert to a timedelta
:type time_string: str
:returns: datetime.timedelta for relative time
:type datetime.timedelta
### Response:
def get_time_delta(time_string: str) -> timedelta:
"""
Takes a time string (1 hours, 10 days, etc.) and returns
a python timedelta object
:param time_string: the time value to convert to a timedelta
:type time_string: str
:returns: datetime.timedelta for relative time
:type datetime.timedelta
"""
rel_time: Pattern = re.compile(
pattern=r"((?P<hours>\d+?)\s+hour)?((?P<minutes>\d+?)\s+minute)?((?P<seconds>\d+?)\s+second)?((?P<days>\d+?)\s+day)?",
# noqa
flags=re.IGNORECASE,
)
parts: Optional[Match[AnyStr]] = rel_time.match(string=time_string)
if not parts:
raise Exception(f"Invalid relative time: {time_string}")
# https://docs.python.org/3/library/re.html#re.Match.groupdict
parts: Dict[str, str] = parts.groupdict()
time_params = {}
if all(value == None for value in parts.values()):
raise Exception(f"Invalid relative time: {time_string}")
for time_unit, magnitude in parts.items():
if magnitude:
time_params[time_unit]: int = int(magnitude)
return timedelta(**time_params) |
def seriesshape(self):
"""Shape of the whole time series (time being the first dimension)."""
seriesshape = [len(hydpy.pub.timegrids.init)]
seriesshape.extend(self.shape)
return tuple(seriesshape) | Shape of the whole time series (time being the first dimension). | Below is the the instruction that describes the task:
### Input:
Shape of the whole time series (time being the first dimension).
### Response:
def seriesshape(self):
"""Shape of the whole time series (time being the first dimension)."""
seriesshape = [len(hydpy.pub.timegrids.init)]
seriesshape.extend(self.shape)
return tuple(seriesshape) |
def _convert_to_dict(data):
"""
Convert `data` to dictionary.
Tries to get sense in multidimensional arrays.
Args:
data: List/dict/tuple of variable dimension.
Returns:
dict: If the data can be converted to dictionary.
Raises:
MetaParsingException: When the data are unconvertible to dict.
"""
if isinstance(data, dict):
return data
if isinstance(data, list) or isinstance(data, tuple):
if _all_correct_list(data):
return dict(data)
else:
data = zip(data[::2], data[1::2])
return dict(data)
else:
raise MetaParsingException(
"Can't decode provided metadata - unknown structure."
) | Convert `data` to dictionary.
Tries to get sense in multidimensional arrays.
Args:
data: List/dict/tuple of variable dimension.
Returns:
dict: If the data can be converted to dictionary.
Raises:
MetaParsingException: When the data are unconvertible to dict. | Below is the the instruction that describes the task:
### Input:
Convert `data` to dictionary.
Tries to get sense in multidimensional arrays.
Args:
data: List/dict/tuple of variable dimension.
Returns:
dict: If the data can be converted to dictionary.
Raises:
MetaParsingException: When the data are unconvertible to dict.
### Response:
def _convert_to_dict(data):
"""
Convert `data` to dictionary.
Tries to get sense in multidimensional arrays.
Args:
data: List/dict/tuple of variable dimension.
Returns:
dict: If the data can be converted to dictionary.
Raises:
MetaParsingException: When the data are unconvertible to dict.
"""
if isinstance(data, dict):
return data
if isinstance(data, list) or isinstance(data, tuple):
if _all_correct_list(data):
return dict(data)
else:
data = zip(data[::2], data[1::2])
return dict(data)
else:
raise MetaParsingException(
"Can't decode provided metadata - unknown structure."
) |
def match(self, fsys_view):
'''
Compare potentially partial criteria against built filesystems entry dictionary
'''
evalue_dict = fsys_view[1]
for key, value in six.viewitems(self.criteria):
if key in evalue_dict:
if evalue_dict[key] != value:
return False
else:
return False
return True | Compare potentially partial criteria against built filesystems entry dictionary | Below is the the instruction that describes the task:
### Input:
Compare potentially partial criteria against built filesystems entry dictionary
### Response:
def match(self, fsys_view):
'''
Compare potentially partial criteria against built filesystems entry dictionary
'''
evalue_dict = fsys_view[1]
for key, value in six.viewitems(self.criteria):
if key in evalue_dict:
if evalue_dict[key] != value:
return False
else:
return False
return True |
def doprinc(data):
"""
Gets principal components from data in form of a list of [dec,inc] data.
Parameters
----------
data : nested list of dec, inc directions
Returns
-------
ppars : dictionary with the principal components
dec : principal directiion declination
inc : principal direction inclination
V2dec : intermediate eigenvector declination
V2inc : intermediate eigenvector inclination
V3dec : minor eigenvector declination
V3inc : minor eigenvector inclination
tau1 : major eigenvalue
tau2 : intermediate eigenvalue
tau3 : minor eigenvalue
N : number of points
Edir : elongation direction [dec, inc, length]
"""
ppars = {}
rad = old_div(np.pi, 180.)
X = dir2cart(data)
# for rec in data:
# dir=[]
# for c in rec: dir.append(c)
# cart= (dir2cart(dir))
# X.append(cart)
# put in T matrix
#
T = np.array(Tmatrix(X))
#
# get sorted evals/evects
#
t, V = tauV(T)
Pdir = cart2dir(V[0])
ppars['Edir'] = cart2dir(V[1]) # elongation direction
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['dec'] = dec
ppars['inc'] = inc
ppars['N'] = len(data)
ppars['tau1'] = t[0]
ppars['tau2'] = t[1]
ppars['tau3'] = t[2]
Pdir = cart2dir(V[1])
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['V2dec'] = dec
ppars['V2inc'] = inc
Pdir = cart2dir(V[2])
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['V3dec'] = dec
ppars['V3inc'] = inc
return ppars | Gets principal components from data in form of a list of [dec,inc] data.
Parameters
----------
data : nested list of dec, inc directions
Returns
-------
ppars : dictionary with the principal components
dec : principal directiion declination
inc : principal direction inclination
V2dec : intermediate eigenvector declination
V2inc : intermediate eigenvector inclination
V3dec : minor eigenvector declination
V3inc : minor eigenvector inclination
tau1 : major eigenvalue
tau2 : intermediate eigenvalue
tau3 : minor eigenvalue
N : number of points
Edir : elongation direction [dec, inc, length] | Below is the the instruction that describes the task:
### Input:
Gets principal components from data in form of a list of [dec,inc] data.
Parameters
----------
data : nested list of dec, inc directions
Returns
-------
ppars : dictionary with the principal components
dec : principal directiion declination
inc : principal direction inclination
V2dec : intermediate eigenvector declination
V2inc : intermediate eigenvector inclination
V3dec : minor eigenvector declination
V3inc : minor eigenvector inclination
tau1 : major eigenvalue
tau2 : intermediate eigenvalue
tau3 : minor eigenvalue
N : number of points
Edir : elongation direction [dec, inc, length]
### Response:
def doprinc(data):
"""
Gets principal components from data in form of a list of [dec,inc] data.
Parameters
----------
data : nested list of dec, inc directions
Returns
-------
ppars : dictionary with the principal components
dec : principal directiion declination
inc : principal direction inclination
V2dec : intermediate eigenvector declination
V2inc : intermediate eigenvector inclination
V3dec : minor eigenvector declination
V3inc : minor eigenvector inclination
tau1 : major eigenvalue
tau2 : intermediate eigenvalue
tau3 : minor eigenvalue
N : number of points
Edir : elongation direction [dec, inc, length]
"""
ppars = {}
rad = old_div(np.pi, 180.)
X = dir2cart(data)
# for rec in data:
# dir=[]
# for c in rec: dir.append(c)
# cart= (dir2cart(dir))
# X.append(cart)
# put in T matrix
#
T = np.array(Tmatrix(X))
#
# get sorted evals/evects
#
t, V = tauV(T)
Pdir = cart2dir(V[0])
ppars['Edir'] = cart2dir(V[1]) # elongation direction
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['dec'] = dec
ppars['inc'] = inc
ppars['N'] = len(data)
ppars['tau1'] = t[0]
ppars['tau2'] = t[1]
ppars['tau3'] = t[2]
Pdir = cart2dir(V[1])
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['V2dec'] = dec
ppars['V2inc'] = inc
Pdir = cart2dir(V[2])
dec, inc = doflip(Pdir[0], Pdir[1])
ppars['V3dec'] = dec
ppars['V3inc'] = inc
return ppars |
def start(self) -> None:
"""
Start the internal control loop. Potentially blocking, depending on
the value of `_run_control_loop` set by the initializer.
"""
self._setup()
if self._run_control_loop:
asyncio.set_event_loop(asyncio.new_event_loop())
self._heartbeat_reciever.start()
self._logger.info(' Start Loop')
return self.loop.start()
else:
self._logger.debug(' run_control_loop == False') | Start the internal control loop. Potentially blocking, depending on
the value of `_run_control_loop` set by the initializer. | Below is the the instruction that describes the task:
### Input:
Start the internal control loop. Potentially blocking, depending on
the value of `_run_control_loop` set by the initializer.
### Response:
def start(self) -> None:
"""
Start the internal control loop. Potentially blocking, depending on
the value of `_run_control_loop` set by the initializer.
"""
self._setup()
if self._run_control_loop:
asyncio.set_event_loop(asyncio.new_event_loop())
self._heartbeat_reciever.start()
self._logger.info(' Start Loop')
return self.loop.start()
else:
self._logger.debug(' run_control_loop == False') |
def change_option_default(self, opt_name, default_val):
""" Change the default value of an option
:param opt_name: option name
:type opt_name: str
:param value: new default option value
"""
if not self.has_option(opt_name):
raise ValueError("Unknow option name (%s)" % opt_name)
self._options[opt_name].default = default_val | Change the default value of an option
:param opt_name: option name
:type opt_name: str
:param value: new default option value | Below is the the instruction that describes the task:
### Input:
Change the default value of an option
:param opt_name: option name
:type opt_name: str
:param value: new default option value
### Response:
def change_option_default(self, opt_name, default_val):
""" Change the default value of an option
:param opt_name: option name
:type opt_name: str
:param value: new default option value
"""
if not self.has_option(opt_name):
raise ValueError("Unknow option name (%s)" % opt_name)
self._options[opt_name].default = default_val |
def parse_sv_frequencies(variant):
"""Parsing of some custom sv frequencies
These are very specific at the moment, this will hopefully get better over time when the
field of structural variants is more developed.
Args:
variant(cyvcf2.Variant)
Returns:
sv_frequencies(dict)
"""
frequency_keys = [
'clingen_cgh_benignAF',
'clingen_cgh_benign',
'clingen_cgh_pathogenicAF',
'clingen_cgh_pathogenic',
'clingen_ngi',
'clingen_ngiAF',
'swegen',
'swegenAF',
'decipherAF',
'decipher'
]
sv_frequencies = {}
for key in frequency_keys:
value = variant.INFO.get(key, 0)
if 'AF' in key:
value = float(value)
else:
value = int(value)
if value > 0:
sv_frequencies[key] = value
return sv_frequencies | Parsing of some custom sv frequencies
These are very specific at the moment, this will hopefully get better over time when the
field of structural variants is more developed.
Args:
variant(cyvcf2.Variant)
Returns:
sv_frequencies(dict) | Below is the the instruction that describes the task:
### Input:
Parsing of some custom sv frequencies
These are very specific at the moment, this will hopefully get better over time when the
field of structural variants is more developed.
Args:
variant(cyvcf2.Variant)
Returns:
sv_frequencies(dict)
### Response:
def parse_sv_frequencies(variant):
"""Parsing of some custom sv frequencies
These are very specific at the moment, this will hopefully get better over time when the
field of structural variants is more developed.
Args:
variant(cyvcf2.Variant)
Returns:
sv_frequencies(dict)
"""
frequency_keys = [
'clingen_cgh_benignAF',
'clingen_cgh_benign',
'clingen_cgh_pathogenicAF',
'clingen_cgh_pathogenic',
'clingen_ngi',
'clingen_ngiAF',
'swegen',
'swegenAF',
'decipherAF',
'decipher'
]
sv_frequencies = {}
for key in frequency_keys:
value = variant.INFO.get(key, 0)
if 'AF' in key:
value = float(value)
else:
value = int(value)
if value > 0:
sv_frequencies[key] = value
return sv_frequencies |
def read(self, size=None):
"""Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file.
"""
if size is not None:
return self.__sf.read(size)
block_size = self.__class__.__block_size
b = bytearray()
received_bytes = 0
while 1:
partial = self.__sf.read(block_size)
# self.__log.debug("Reading (%d) bytes. (%d) bytes returned." %
# (block_size, len(partial)))
b.extend(partial)
received_bytes += len(partial)
if len(partial) < block_size:
self.__log.debug("End of file.")
break
self.__log.debug("Read (%d) bytes for total-file." % (received_bytes))
return b | Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file. | Below is the the instruction that describes the task:
### Input:
Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file.
### Response:
def read(self, size=None):
"""Read a length of bytes. Return empty on EOF. If 'size' is omitted,
return whole file.
"""
if size is not None:
return self.__sf.read(size)
block_size = self.__class__.__block_size
b = bytearray()
received_bytes = 0
while 1:
partial = self.__sf.read(block_size)
# self.__log.debug("Reading (%d) bytes. (%d) bytes returned." %
# (block_size, len(partial)))
b.extend(partial)
received_bytes += len(partial)
if len(partial) < block_size:
self.__log.debug("End of file.")
break
self.__log.debug("Read (%d) bytes for total-file." % (received_bytes))
return b |
def profile(self):
"""Measure of bandedness, also known as 'envelope size'."""
leftmost_idx = np.argmax(self.matrix('dense').astype(bool), axis=0)
return (np.arange(self.num_vertices()) - leftmost_idx).sum() | Measure of bandedness, also known as 'envelope size'. | Below is the the instruction that describes the task:
### Input:
Measure of bandedness, also known as 'envelope size'.
### Response:
def profile(self):
"""Measure of bandedness, also known as 'envelope size'."""
leftmost_idx = np.argmax(self.matrix('dense').astype(bool), axis=0)
return (np.arange(self.num_vertices()) - leftmost_idx).sum() |
def transmit(self, bytes, protocol=None):
"""Transmit an apdu. Internally calls doTransmit() class method
and notify observers upon command/response APDU events.
Subclasses must override the doTransmit() class method.
@param bytes: list of bytes to transmit
@param protocol: the transmission protocol, from
CardConnection.T0_protocol,
CardConnection.T1_protocol, or
CardConnection.RAW_protocol
"""
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'command',
[bytes, protocol]))
data, sw1, sw2 = self.doTransmit(bytes, protocol)
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'response',
[data, sw1, sw2]))
if self.errorcheckingchain is not None:
self.errorcheckingchain[0](data, sw1, sw2)
return data, sw1, sw2 | Transmit an apdu. Internally calls doTransmit() class method
and notify observers upon command/response APDU events.
Subclasses must override the doTransmit() class method.
@param bytes: list of bytes to transmit
@param protocol: the transmission protocol, from
CardConnection.T0_protocol,
CardConnection.T1_protocol, or
CardConnection.RAW_protocol | Below is the the instruction that describes the task:
### Input:
Transmit an apdu. Internally calls doTransmit() class method
and notify observers upon command/response APDU events.
Subclasses must override the doTransmit() class method.
@param bytes: list of bytes to transmit
@param protocol: the transmission protocol, from
CardConnection.T0_protocol,
CardConnection.T1_protocol, or
CardConnection.RAW_protocol
### Response:
def transmit(self, bytes, protocol=None):
"""Transmit an apdu. Internally calls doTransmit() class method
and notify observers upon command/response APDU events.
Subclasses must override the doTransmit() class method.
@param bytes: list of bytes to transmit
@param protocol: the transmission protocol, from
CardConnection.T0_protocol,
CardConnection.T1_protocol, or
CardConnection.RAW_protocol
"""
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'command',
[bytes, protocol]))
data, sw1, sw2 = self.doTransmit(bytes, protocol)
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'response',
[data, sw1, sw2]))
if self.errorcheckingchain is not None:
self.errorcheckingchain[0](data, sw1, sw2)
return data, sw1, sw2 |
def _dict_increment(self, dictionary, key):
"""Increments the value of the dictionary at the specified key."""
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1 | Increments the value of the dictionary at the specified key. | Below is the the instruction that describes the task:
### Input:
Increments the value of the dictionary at the specified key.
### Response:
def _dict_increment(self, dictionary, key):
"""Increments the value of the dictionary at the specified key."""
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1 |
def setup_image_plane_pixelization_grid_from_galaxies_and_grid_stack(galaxies, grid_stack):
"""An image-plane pixelization is one where its pixel centres are computed by tracing a sparse grid of pixels from \
the image's regular grid to other planes (e.g. the source-plane).
Provided a galaxy has an image-plane pixelization, this function returns a new *GridStack* instance where the \
image-plane pixelization's sparse grid is added to it as an attibute.
Thus, when the *GridStack* are are passed to the *ray_tracing* module this sparse grid is also traced and the \
traced coordinates represent the centre of each pixelization pixel.
Parameters
-----------
galaxies : [model.galaxy.galaxy.Galaxy]
A list of galaxies, which may contain pixelizations and an *ImagePlanePixelization*.
grid_stacks : image.array.grid_stacks.GridStack
The collection of grid_stacks (regular, sub, etc.) which the image-plane pixelization grid (referred to as pix) \
may be added to.
"""
if not isinstance(grid_stack.regular, grids.PaddedRegularGrid):
for galaxy in galaxies:
if hasattr(galaxy, 'pixelization'):
if isinstance(galaxy.pixelization, ImagePlanePixelization):
image_plane_pix_grid = galaxy.pixelization.image_plane_pix_grid_from_regular_grid(
regular_grid=grid_stack.regular)
return grid_stack.new_grid_stack_with_pix_grid_added(pix_grid=image_plane_pix_grid.sparse_grid,
regular_to_nearest_pix=image_plane_pix_grid.regular_to_sparse)
return grid_stack | An image-plane pixelization is one where its pixel centres are computed by tracing a sparse grid of pixels from \
the image's regular grid to other planes (e.g. the source-plane).
Provided a galaxy has an image-plane pixelization, this function returns a new *GridStack* instance where the \
image-plane pixelization's sparse grid is added to it as an attibute.
Thus, when the *GridStack* are are passed to the *ray_tracing* module this sparse grid is also traced and the \
traced coordinates represent the centre of each pixelization pixel.
Parameters
-----------
galaxies : [model.galaxy.galaxy.Galaxy]
A list of galaxies, which may contain pixelizations and an *ImagePlanePixelization*.
grid_stacks : image.array.grid_stacks.GridStack
The collection of grid_stacks (regular, sub, etc.) which the image-plane pixelization grid (referred to as pix) \
may be added to. | Below is the the instruction that describes the task:
### Input:
An image-plane pixelization is one where its pixel centres are computed by tracing a sparse grid of pixels from \
the image's regular grid to other planes (e.g. the source-plane).
Provided a galaxy has an image-plane pixelization, this function returns a new *GridStack* instance where the \
image-plane pixelization's sparse grid is added to it as an attibute.
Thus, when the *GridStack* are are passed to the *ray_tracing* module this sparse grid is also traced and the \
traced coordinates represent the centre of each pixelization pixel.
Parameters
-----------
galaxies : [model.galaxy.galaxy.Galaxy]
A list of galaxies, which may contain pixelizations and an *ImagePlanePixelization*.
grid_stacks : image.array.grid_stacks.GridStack
The collection of grid_stacks (regular, sub, etc.) which the image-plane pixelization grid (referred to as pix) \
may be added to.
### Response:
def setup_image_plane_pixelization_grid_from_galaxies_and_grid_stack(galaxies, grid_stack):
"""An image-plane pixelization is one where its pixel centres are computed by tracing a sparse grid of pixels from \
the image's regular grid to other planes (e.g. the source-plane).
Provided a galaxy has an image-plane pixelization, this function returns a new *GridStack* instance where the \
image-plane pixelization's sparse grid is added to it as an attibute.
Thus, when the *GridStack* are are passed to the *ray_tracing* module this sparse grid is also traced and the \
traced coordinates represent the centre of each pixelization pixel.
Parameters
-----------
galaxies : [model.galaxy.galaxy.Galaxy]
A list of galaxies, which may contain pixelizations and an *ImagePlanePixelization*.
grid_stacks : image.array.grid_stacks.GridStack
The collection of grid_stacks (regular, sub, etc.) which the image-plane pixelization grid (referred to as pix) \
may be added to.
"""
if not isinstance(grid_stack.regular, grids.PaddedRegularGrid):
for galaxy in galaxies:
if hasattr(galaxy, 'pixelization'):
if isinstance(galaxy.pixelization, ImagePlanePixelization):
image_plane_pix_grid = galaxy.pixelization.image_plane_pix_grid_from_regular_grid(
regular_grid=grid_stack.regular)
return grid_stack.new_grid_stack_with_pix_grid_added(pix_grid=image_plane_pix_grid.sparse_grid,
regular_to_nearest_pix=image_plane_pix_grid.regular_to_sparse)
return grid_stack |
def sbar(Ss):
"""
calculate average s,sigma from list of "s"s.
"""
if type(Ss) == list:
Ss = np.array(Ss)
npts = Ss.shape[0]
Ss = Ss.transpose()
avd, avs = [], []
# D=np.array([Ss[0],Ss[1],Ss[2],Ss[3]+0.5*(Ss[0]+Ss[1]),Ss[4]+0.5*(Ss[1]+Ss[2]),Ss[5]+0.5*(Ss[0]+Ss[2])]).transpose()
D = np.array([Ss[0], Ss[1], Ss[2], Ss[3] + 0.5 * (Ss[0] + Ss[1]),
Ss[4] + 0.5 * (Ss[1] + Ss[2]), Ss[5] + 0.5 * (Ss[0] + Ss[2])])
for j in range(6):
avd.append(np.average(D[j]))
avs.append(np.average(Ss[j]))
D = D.transpose()
# for s in Ss:
# print 'from sbar: ',s
# D.append(s[:]) # append a copy of s
# D[-1][3]=D[-1][3]+0.5*(s[0]+s[1])
# D[-1][4]=D[-1][4]+0.5*(s[1]+s[2])
# D[-1][5]=D[-1][5]+0.5*(s[0]+s[2])
# for j in range(6):
# avd[j]+=(D[-1][j])/float(npts)
# avs[j]+=(s[j])/float(npts)
# calculate sigma
nf = (npts - 1) * 6 # number of degrees of freedom
s0 = 0
Dels = (D - avd)**2
s0 = np.sum(Dels)
sigma = np.sqrt(s0/float(nf))
return nf, sigma, avs | calculate average s,sigma from list of "s"s. | Below is the the instruction that describes the task:
### Input:
calculate average s,sigma from list of "s"s.
### Response:
def sbar(Ss):
"""
calculate average s,sigma from list of "s"s.
"""
if type(Ss) == list:
Ss = np.array(Ss)
npts = Ss.shape[0]
Ss = Ss.transpose()
avd, avs = [], []
# D=np.array([Ss[0],Ss[1],Ss[2],Ss[3]+0.5*(Ss[0]+Ss[1]),Ss[4]+0.5*(Ss[1]+Ss[2]),Ss[5]+0.5*(Ss[0]+Ss[2])]).transpose()
D = np.array([Ss[0], Ss[1], Ss[2], Ss[3] + 0.5 * (Ss[0] + Ss[1]),
Ss[4] + 0.5 * (Ss[1] + Ss[2]), Ss[5] + 0.5 * (Ss[0] + Ss[2])])
for j in range(6):
avd.append(np.average(D[j]))
avs.append(np.average(Ss[j]))
D = D.transpose()
# for s in Ss:
# print 'from sbar: ',s
# D.append(s[:]) # append a copy of s
# D[-1][3]=D[-1][3]+0.5*(s[0]+s[1])
# D[-1][4]=D[-1][4]+0.5*(s[1]+s[2])
# D[-1][5]=D[-1][5]+0.5*(s[0]+s[2])
# for j in range(6):
# avd[j]+=(D[-1][j])/float(npts)
# avs[j]+=(s[j])/float(npts)
# calculate sigma
nf = (npts - 1) * 6 # number of degrees of freedom
s0 = 0
Dels = (D - avd)**2
s0 = np.sum(Dels)
sigma = np.sqrt(s0/float(nf))
return nf, sigma, avs |
def next_string(min_size, max_size):
"""
Generates a random string, consisting of upper and lower case letters (of the English alphabet),
digits (0-9), and symbols ("_,.:-/.[].{},#-!,$=%.+^.&*-() ").
:param min_size: (optional) minimum string length.
:param max_size: maximum string length.
:return: a random string.
"""
result = ''
max_size = max_size if max_size != None else min_size
length = RandomInteger.next_integer(min_size, max_size)
for i in range(length):
result += random.choice(_chars)
return result | Generates a random string, consisting of upper and lower case letters (of the English alphabet),
digits (0-9), and symbols ("_,.:-/.[].{},#-!,$=%.+^.&*-() ").
:param min_size: (optional) minimum string length.
:param max_size: maximum string length.
:return: a random string. | Below is the the instruction that describes the task:
### Input:
Generates a random string, consisting of upper and lower case letters (of the English alphabet),
digits (0-9), and symbols ("_,.:-/.[].{},#-!,$=%.+^.&*-() ").
:param min_size: (optional) minimum string length.
:param max_size: maximum string length.
:return: a random string.
### Response:
def next_string(min_size, max_size):
"""
Generates a random string, consisting of upper and lower case letters (of the English alphabet),
digits (0-9), and symbols ("_,.:-/.[].{},#-!,$=%.+^.&*-() ").
:param min_size: (optional) minimum string length.
:param max_size: maximum string length.
:return: a random string.
"""
result = ''
max_size = max_size if max_size != None else min_size
length = RandomInteger.next_integer(min_size, max_size)
for i in range(length):
result += random.choice(_chars)
return result |
def headloss_fric(FlowRate, Diam, Length, Nu, PipeRough):
"""Return the major head loss (due to wall shear) in a pipe.
This equation applies to both laminar and turbulent flows.
"""
#Checking input validity - inputs not checked here are checked by
#functions this function calls.
ut.check_range([Length, ">0", "Length"])
return (fric(FlowRate, Diam, Nu, PipeRough)
* 8 / (gravity.magnitude * np.pi**2)
* (Length * FlowRate**2) / Diam**5
) | Return the major head loss (due to wall shear) in a pipe.
This equation applies to both laminar and turbulent flows. | Below is the the instruction that describes the task:
### Input:
Return the major head loss (due to wall shear) in a pipe.
This equation applies to both laminar and turbulent flows.
### Response:
def headloss_fric(FlowRate, Diam, Length, Nu, PipeRough):
"""Return the major head loss (due to wall shear) in a pipe.
This equation applies to both laminar and turbulent flows.
"""
#Checking input validity - inputs not checked here are checked by
#functions this function calls.
ut.check_range([Length, ">0", "Length"])
return (fric(FlowRate, Diam, Nu, PipeRough)
* 8 / (gravity.magnitude * np.pi**2)
* (Length * FlowRate**2) / Diam**5
) |
def ticket_satisfaction_rating_create(self, ticket_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings#create-a-satisfaction-rating"
api_path = "/api/v2/tickets/{ticket_id}/satisfaction_rating.json"
api_path = api_path.format(ticket_id=ticket_id)
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings#create-a-satisfaction-rating | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings#create-a-satisfaction-rating
### Response:
def ticket_satisfaction_rating_create(self, ticket_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/satisfaction_ratings#create-a-satisfaction-rating"
api_path = "/api/v2/tickets/{ticket_id}/satisfaction_rating.json"
api_path = api_path.format(ticket_id=ticket_id)
return self.call(api_path, method="POST", data=data, **kwargs) |
def get_manifest_digests(image, registry, insecure=False, dockercfg_path=None,
versions=('v1', 'v2', 'v2_list', 'oci', 'oci_index'), require_digest=True):
"""Return manifest digest for image.
:param image: ImageName, the remote image to inspect
:param registry: str, URI for registry, if URI schema is not provided,
https:// will be used
:param insecure: bool, when True registry's cert is not verified
:param dockercfg_path: str, dirname of .dockercfg location
:param versions: tuple, which manifest schema versions to fetch digest
:param require_digest: bool, when True exception is thrown if no digest is
set in the headers.
:return: dict, versions mapped to their digest
"""
registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path)
digests = {}
# If all of the media types return a 404 NOT_FOUND status, then we rethrow
# an exception, if all of the media types fail for some other reason - like
# bad headers - then we return a ManifestDigest object with no digests.
# This is interesting for the Pulp "retry until the manifest shows up" case.
all_not_found = True
saved_not_found = None
for version in versions:
media_type = get_manifest_media_type(version)
response, saved_not_found = get_manifest(image, registry_session, version)
if saved_not_found is None:
all_not_found = False
if not response:
continue
# set it to truthy value so that koji_import would know pulp supports these digests
digests[version] = True
if not response.headers.get('Docker-Content-Digest'):
logger.warning('Unable to fetch digest for %s, no Docker-Content-Digest header',
media_type)
continue
digests[version] = response.headers['Docker-Content-Digest']
context = '/'.join([x for x in [image.namespace, image.repo] if x])
tag = image.tag
logger.debug('Image %s:%s has %s manifest digest: %s',
context, tag, version, digests[version])
if not digests:
if all_not_found and len(versions) > 0:
raise saved_not_found
if require_digest:
raise RuntimeError('No digests found for {}'.format(image))
return ManifestDigest(**digests) | Return manifest digest for image.
:param image: ImageName, the remote image to inspect
:param registry: str, URI for registry, if URI schema is not provided,
https:// will be used
:param insecure: bool, when True registry's cert is not verified
:param dockercfg_path: str, dirname of .dockercfg location
:param versions: tuple, which manifest schema versions to fetch digest
:param require_digest: bool, when True exception is thrown if no digest is
set in the headers.
:return: dict, versions mapped to their digest | Below is the the instruction that describes the task:
### Input:
Return manifest digest for image.
:param image: ImageName, the remote image to inspect
:param registry: str, URI for registry, if URI schema is not provided,
https:// will be used
:param insecure: bool, when True registry's cert is not verified
:param dockercfg_path: str, dirname of .dockercfg location
:param versions: tuple, which manifest schema versions to fetch digest
:param require_digest: bool, when True exception is thrown if no digest is
set in the headers.
:return: dict, versions mapped to their digest
### Response:
def get_manifest_digests(image, registry, insecure=False, dockercfg_path=None,
versions=('v1', 'v2', 'v2_list', 'oci', 'oci_index'), require_digest=True):
"""Return manifest digest for image.
:param image: ImageName, the remote image to inspect
:param registry: str, URI for registry, if URI schema is not provided,
https:// will be used
:param insecure: bool, when True registry's cert is not verified
:param dockercfg_path: str, dirname of .dockercfg location
:param versions: tuple, which manifest schema versions to fetch digest
:param require_digest: bool, when True exception is thrown if no digest is
set in the headers.
:return: dict, versions mapped to their digest
"""
registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path)
digests = {}
# If all of the media types return a 404 NOT_FOUND status, then we rethrow
# an exception, if all of the media types fail for some other reason - like
# bad headers - then we return a ManifestDigest object with no digests.
# This is interesting for the Pulp "retry until the manifest shows up" case.
all_not_found = True
saved_not_found = None
for version in versions:
media_type = get_manifest_media_type(version)
response, saved_not_found = get_manifest(image, registry_session, version)
if saved_not_found is None:
all_not_found = False
if not response:
continue
# set it to truthy value so that koji_import would know pulp supports these digests
digests[version] = True
if not response.headers.get('Docker-Content-Digest'):
logger.warning('Unable to fetch digest for %s, no Docker-Content-Digest header',
media_type)
continue
digests[version] = response.headers['Docker-Content-Digest']
context = '/'.join([x for x in [image.namespace, image.repo] if x])
tag = image.tag
logger.debug('Image %s:%s has %s manifest digest: %s',
context, tag, version, digests[version])
if not digests:
if all_not_found and len(versions) > 0:
raise saved_not_found
if require_digest:
raise RuntimeError('No digests found for {}'.format(image))
return ManifestDigest(**digests) |
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Update <resource> Properties."""
assert wait_for_completion is True # async not supported yet
try:
resource = hmc.lookup_by_uri(uri)
except KeyError:
raise InvalidResourceError(method, uri)
resource.update(body) | Operation: Update <resource> Properties. | Below is the the instruction that describes the task:
### Input:
Operation: Update <resource> Properties.
### Response:
def post(method, hmc, uri, uri_parms, body, logon_required,
wait_for_completion):
"""Operation: Update <resource> Properties."""
assert wait_for_completion is True # async not supported yet
try:
resource = hmc.lookup_by_uri(uri)
except KeyError:
raise InvalidResourceError(method, uri)
resource.update(body) |
def conv2d_fixed_padding(inputs,
filters,
kernel_size,
strides,
data_format="channels_first",
use_td=False,
targeting_rate=None,
keep_prob=None,
is_training=None):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
is_training: `bool` for whether the model is in training.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
Raises:
Exception: if use_td is not valid.
"""
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format=data_format)
if use_td:
inputs_shape = common_layers.shape_list(inputs)
if use_td == "weight":
if data_format == "channels_last":
size = kernel_size * kernel_size * inputs_shape[-1]
else:
size = kernel_size * kernel_size * inputs_shape[1]
targeting_count = targeting_rate * tf.to_float(size)
targeting_fn = common_layers.weight_targeting
elif use_td == "unit":
targeting_count = targeting_rate * filters
targeting_fn = common_layers.unit_targeting
else:
raise Exception("Unrecognized targeted dropout type: %s" % use_td)
y = common_layers.td_conv(
inputs,
filters,
kernel_size,
targeting_count,
targeting_fn,
keep_prob,
is_training,
do_prune=True,
strides=strides,
padding=("SAME" if strides == 1 else "VALID"),
data_format=data_format,
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer())
else:
y = layers().Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=("SAME" if strides == 1 else "VALID"),
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)(inputs)
return y | Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
is_training: `bool` for whether the model is in training.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
Raises:
Exception: if use_td is not valid. | Below is the the instruction that describes the task:
### Input:
Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
is_training: `bool` for whether the model is in training.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
Raises:
Exception: if use_td is not valid.
### Response:
def conv2d_fixed_padding(inputs,
filters,
kernel_size,
strides,
data_format="channels_first",
use_td=False,
targeting_rate=None,
keep_prob=None,
is_training=None):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
use_td: `str` one of "weight" or "unit". Set to False or "" to disable
targeted dropout.
targeting_rate: `float` proportion of weights to target with targeted
dropout.
keep_prob: `float` keep probability for targeted dropout.
is_training: `bool` for whether the model is in training.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
Raises:
Exception: if use_td is not valid.
"""
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format=data_format)
if use_td:
inputs_shape = common_layers.shape_list(inputs)
if use_td == "weight":
if data_format == "channels_last":
size = kernel_size * kernel_size * inputs_shape[-1]
else:
size = kernel_size * kernel_size * inputs_shape[1]
targeting_count = targeting_rate * tf.to_float(size)
targeting_fn = common_layers.weight_targeting
elif use_td == "unit":
targeting_count = targeting_rate * filters
targeting_fn = common_layers.unit_targeting
else:
raise Exception("Unrecognized targeted dropout type: %s" % use_td)
y = common_layers.td_conv(
inputs,
filters,
kernel_size,
targeting_count,
targeting_fn,
keep_prob,
is_training,
do_prune=True,
strides=strides,
padding=("SAME" if strides == 1 else "VALID"),
data_format=data_format,
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer())
else:
y = layers().Conv2D(
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=("SAME" if strides == 1 else "VALID"),
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)(inputs)
return y |
def _parse_part(client, command, actor, args):
"""Parse a PART and update channel states, then dispatch events.
Note that two events are dispatched here:
- PART, because a user parted the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
channel, _, message = args.partition(' :')
channel = client.server.get_channel(channel)
channel.remove_user(actor)
if actor.nick == client.user.nick:
client.server.remove_channel(channel)
client.dispatch_event("PART", actor, channel, message)
if actor.nick != client.user.nick:
client.dispatch_event("MEMBERS", channel) | Parse a PART and update channel states, then dispatch events.
Note that two events are dispatched here:
- PART, because a user parted the channel
- MEMBERS, because the channel's members changed | Below is the the instruction that describes the task:
### Input:
Parse a PART and update channel states, then dispatch events.
Note that two events are dispatched here:
- PART, because a user parted the channel
- MEMBERS, because the channel's members changed
### Response:
def _parse_part(client, command, actor, args):
"""Parse a PART and update channel states, then dispatch events.
Note that two events are dispatched here:
- PART, because a user parted the channel
- MEMBERS, because the channel's members changed
"""
actor = User(actor)
channel, _, message = args.partition(' :')
channel = client.server.get_channel(channel)
channel.remove_user(actor)
if actor.nick == client.user.nick:
client.server.remove_channel(channel)
client.dispatch_event("PART", actor, channel, message)
if actor.nick != client.user.nick:
client.dispatch_event("MEMBERS", channel) |
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
audio_paths = []
texts = []
for duration, audio_path, text in self.data[i][j:j+self.batch_size]:
audio_paths.append(audio_path)
texts.append(text)
if self.is_first_epoch:
data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=True,
is_bi_graphemes=self.is_bi_graphemes,
seq_length=self.buckets[i],
save_feature_as_csvfile=self.save_feature_as_csvfile)
else:
data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=False,
is_bi_graphemes=self.is_bi_graphemes,
seq_length=self.buckets[i],
save_feature_as_csvfile=self.save_feature_as_csvfile)
data_all = [mx.nd.array(data_set['x'])] + self.init_state_arrays
label_all = [mx.nd.array(data_set['y'])]
self.label = label_all
provide_data = [('data', (self.batch_size, self.buckets[i], self.width * self.height))] + self.init_states
return mx.io.DataBatch(data_all, label_all, pad=0,
bucket_key=self.buckets[i],
provide_data=provide_data,
provide_label=self.provide_label) | Returns the next batch of data. | Below is the the instruction that describes the task:
### Input:
Returns the next batch of data.
### Response:
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
audio_paths = []
texts = []
for duration, audio_path, text in self.data[i][j:j+self.batch_size]:
audio_paths.append(audio_path)
texts.append(text)
if self.is_first_epoch:
data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=True,
is_bi_graphemes=self.is_bi_graphemes,
seq_length=self.buckets[i],
save_feature_as_csvfile=self.save_feature_as_csvfile)
else:
data_set = self.datagen.prepare_minibatch(audio_paths, texts, overwrite=False,
is_bi_graphemes=self.is_bi_graphemes,
seq_length=self.buckets[i],
save_feature_as_csvfile=self.save_feature_as_csvfile)
data_all = [mx.nd.array(data_set['x'])] + self.init_state_arrays
label_all = [mx.nd.array(data_set['y'])]
self.label = label_all
provide_data = [('data', (self.batch_size, self.buckets[i], self.width * self.height))] + self.init_states
return mx.io.DataBatch(data_all, label_all, pad=0,
bucket_key=self.buckets[i],
provide_data=provide_data,
provide_label=self.provide_label) |
def process_needlist(app, doctree, fromdocname):
"""
Replace all needlist nodes with a list of the collected needs.
Augment each need with a backlink to the original location.
"""
env = app.builder.env
for node in doctree.traverse(Needlist):
if not app.config.needs_include_needs:
# Ok, this is really dirty.
# If we replace a node, docutils checks, if it will not lose any attributes.
# But this is here the case, because we are using the attribute "ids" of a node.
# However, I do not understand, why losing an attribute is such a big deal, so we delete everything
# before docutils claims about it.
for att in ('ids', 'names', 'classes', 'dupnames'):
node[att] = []
node.replace_self([])
continue
id = node.attributes["ids"][0]
current_needfilter = env.need_all_needlists[id]
all_needs = env.needs_all_needs
content = []
all_needs = list(all_needs.values())
if current_needfilter["sort_by"] is not None:
if current_needfilter["sort_by"] == "id":
all_needs = sorted(all_needs, key=lambda node: node["id"])
elif current_needfilter["sort_by"] == "status":
all_needs = sorted(all_needs, key=status_sorter)
found_needs = procces_filters(all_needs, current_needfilter)
line_block = nodes.line_block()
for need_info in found_needs:
para = nodes.line()
description = "%s: %s" % (need_info["id"], need_info["title"])
if current_needfilter["show_status"] and need_info["status"] is not None:
description += " (%s)" % need_info["status"]
if current_needfilter["show_tags"] and need_info["tags"] is not None:
description += " [%s]" % "; ".join(need_info["tags"])
title = nodes.Text(description, description)
# Create a reference
if not need_info["hide"]:
ref = nodes.reference('', '')
ref['refdocname'] = need_info['docname']
ref['refuri'] = app.builder.get_relative_uri(
fromdocname, need_info['docname'])
ref['refuri'] += '#' + need_info['target_node']['refid']
ref.append(title)
para += ref
else:
para += title
line_block.append(para)
content.append(line_block)
if len(content) == 0:
content.append(no_needs_found_paragraph())
if current_needfilter["show_filters"]:
content.append(used_filter_paragraph(current_needfilter))
node.replace_self(content) | Replace all needlist nodes with a list of the collected needs.
Augment each need with a backlink to the original location. | Below is the the instruction that describes the task:
### Input:
Replace all needlist nodes with a list of the collected needs.
Augment each need with a backlink to the original location.
### Response:
def process_needlist(app, doctree, fromdocname):
"""
Replace all needlist nodes with a list of the collected needs.
Augment each need with a backlink to the original location.
"""
env = app.builder.env
for node in doctree.traverse(Needlist):
if not app.config.needs_include_needs:
# Ok, this is really dirty.
# If we replace a node, docutils checks, if it will not lose any attributes.
# But this is here the case, because we are using the attribute "ids" of a node.
# However, I do not understand, why losing an attribute is such a big deal, so we delete everything
# before docutils claims about it.
for att in ('ids', 'names', 'classes', 'dupnames'):
node[att] = []
node.replace_self([])
continue
id = node.attributes["ids"][0]
current_needfilter = env.need_all_needlists[id]
all_needs = env.needs_all_needs
content = []
all_needs = list(all_needs.values())
if current_needfilter["sort_by"] is not None:
if current_needfilter["sort_by"] == "id":
all_needs = sorted(all_needs, key=lambda node: node["id"])
elif current_needfilter["sort_by"] == "status":
all_needs = sorted(all_needs, key=status_sorter)
found_needs = procces_filters(all_needs, current_needfilter)
line_block = nodes.line_block()
for need_info in found_needs:
para = nodes.line()
description = "%s: %s" % (need_info["id"], need_info["title"])
if current_needfilter["show_status"] and need_info["status"] is not None:
description += " (%s)" % need_info["status"]
if current_needfilter["show_tags"] and need_info["tags"] is not None:
description += " [%s]" % "; ".join(need_info["tags"])
title = nodes.Text(description, description)
# Create a reference
if not need_info["hide"]:
ref = nodes.reference('', '')
ref['refdocname'] = need_info['docname']
ref['refuri'] = app.builder.get_relative_uri(
fromdocname, need_info['docname'])
ref['refuri'] += '#' + need_info['target_node']['refid']
ref.append(title)
para += ref
else:
para += title
line_block.append(para)
content.append(line_block)
if len(content) == 0:
content.append(no_needs_found_paragraph())
if current_needfilter["show_filters"]:
content.append(used_filter_paragraph(current_needfilter))
node.replace_self(content) |
def get_movielens_iter(filename, batch_size):
"""Not particularly fast code to parse the text file and load into NDArrays.
return two data iters, one for train, the other for validation.
"""
logging.info("Preparing data iterators for " + filename + " ... ")
user = []
item = []
score = []
with open(filename, 'r') as f:
num_samples = 0
for line in f:
tks = line.strip().split('::')
if len(tks) != 4:
continue
num_samples += 1
user.append((tks[0]))
item.append((tks[1]))
score.append((tks[2]))
# convert to ndarrays
user = mx.nd.array(user, dtype='int32')
item = mx.nd.array(item)
score = mx.nd.array(score)
# prepare data iters
data_train = {'user': user, 'item': item}
label_train = {'score': score}
iter_train = mx.io.NDArrayIter(data=data_train,label=label_train,
batch_size=batch_size, shuffle=True)
return mx.io.PrefetchingIter(iter_train) | Not particularly fast code to parse the text file and load into NDArrays.
return two data iters, one for train, the other for validation. | Below is the the instruction that describes the task:
### Input:
Not particularly fast code to parse the text file and load into NDArrays.
return two data iters, one for train, the other for validation.
### Response:
def get_movielens_iter(filename, batch_size):
"""Not particularly fast code to parse the text file and load into NDArrays.
return two data iters, one for train, the other for validation.
"""
logging.info("Preparing data iterators for " + filename + " ... ")
user = []
item = []
score = []
with open(filename, 'r') as f:
num_samples = 0
for line in f:
tks = line.strip().split('::')
if len(tks) != 4:
continue
num_samples += 1
user.append((tks[0]))
item.append((tks[1]))
score.append((tks[2]))
# convert to ndarrays
user = mx.nd.array(user, dtype='int32')
item = mx.nd.array(item)
score = mx.nd.array(score)
# prepare data iters
data_train = {'user': user, 'item': item}
label_train = {'score': score}
iter_train = mx.io.NDArrayIter(data=data_train,label=label_train,
batch_size=batch_size, shuffle=True)
return mx.io.PrefetchingIter(iter_train) |
def _speak_normal_inherit(self, element):
"""
Speak the content of element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
self._visit(element, self._speak_normal)
element.normalize() | Speak the content of element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement | Below is the the instruction that describes the task:
### Input:
Speak the content of element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
### Response:
def _speak_normal_inherit(self, element):
"""
Speak the content of element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
self._visit(element, self._speak_normal)
element.normalize() |
def init(self, formula, incr=False):
"""
Initialize the internal SAT oracle. The oracle is used
incrementally and so it is initialized only once when
constructing an object of class :class:`RC2`. Given an
input :class:`.WCNF` formula, the method bootstraps the
oracle with its hard clauses. It also augments the soft
clauses with "fresh" selectors and adds them to the oracle
afterwards.
Optional input parameter ``incr`` (``False`` by default)
regulates whether or not Glucose's incremental mode [6]_
is turned on.
:param formula: input formula
:param incr: apply incremental mode of Glucose
:type formula: :class:`.WCNF`
:type incr: bool
"""
# creating a solver object
self.oracle = Solver(name=self.solver, bootstrap_with=formula.hard,
incr=incr, use_timer=True)
# adding soft clauses to oracle
for i, cl in enumerate(formula.soft):
selv = cl[0] # if clause is unit, selector variable is its literal
if len(cl) > 1:
self.topv += 1
selv = self.topv
cl.append(-self.topv)
self.oracle.add_clause(cl)
if selv not in self.wght:
# record selector and its weight
self.sels.append(selv)
self.wght[selv] = formula.wght[i]
self.smap[selv] = i
else:
# selector is not new; increment its weight
self.wght[selv] += formula.wght[i]
# storing the set of selectors
self.sels_set = set(self.sels)
# at this point internal and external variables are the same
for v in range(1, formula.nv + 1):
self.vmap.e2i[v] = v
self.vmap.i2e[v] = v
if self.verbose > 1:
print('c formula: {0} vars, {1} hard, {2} soft'.format(formula.nv,
len(formula.hard), len(formula.soft))) | Initialize the internal SAT oracle. The oracle is used
incrementally and so it is initialized only once when
constructing an object of class :class:`RC2`. Given an
input :class:`.WCNF` formula, the method bootstraps the
oracle with its hard clauses. It also augments the soft
clauses with "fresh" selectors and adds them to the oracle
afterwards.
Optional input parameter ``incr`` (``False`` by default)
regulates whether or not Glucose's incremental mode [6]_
is turned on.
:param formula: input formula
:param incr: apply incremental mode of Glucose
:type formula: :class:`.WCNF`
:type incr: bool | Below is the the instruction that describes the task:
### Input:
Initialize the internal SAT oracle. The oracle is used
incrementally and so it is initialized only once when
constructing an object of class :class:`RC2`. Given an
input :class:`.WCNF` formula, the method bootstraps the
oracle with its hard clauses. It also augments the soft
clauses with "fresh" selectors and adds them to the oracle
afterwards.
Optional input parameter ``incr`` (``False`` by default)
regulates whether or not Glucose's incremental mode [6]_
is turned on.
:param formula: input formula
:param incr: apply incremental mode of Glucose
:type formula: :class:`.WCNF`
:type incr: bool
### Response:
def init(self, formula, incr=False):
"""
Initialize the internal SAT oracle. The oracle is used
incrementally and so it is initialized only once when
constructing an object of class :class:`RC2`. Given an
input :class:`.WCNF` formula, the method bootstraps the
oracle with its hard clauses. It also augments the soft
clauses with "fresh" selectors and adds them to the oracle
afterwards.
Optional input parameter ``incr`` (``False`` by default)
regulates whether or not Glucose's incremental mode [6]_
is turned on.
:param formula: input formula
:param incr: apply incremental mode of Glucose
:type formula: :class:`.WCNF`
:type incr: bool
"""
# creating a solver object
self.oracle = Solver(name=self.solver, bootstrap_with=formula.hard,
incr=incr, use_timer=True)
# adding soft clauses to oracle
for i, cl in enumerate(formula.soft):
selv = cl[0] # if clause is unit, selector variable is its literal
if len(cl) > 1:
self.topv += 1
selv = self.topv
cl.append(-self.topv)
self.oracle.add_clause(cl)
if selv not in self.wght:
# record selector and its weight
self.sels.append(selv)
self.wght[selv] = formula.wght[i]
self.smap[selv] = i
else:
# selector is not new; increment its weight
self.wght[selv] += formula.wght[i]
# storing the set of selectors
self.sels_set = set(self.sels)
# at this point internal and external variables are the same
for v in range(1, formula.nv + 1):
self.vmap.e2i[v] = v
self.vmap.i2e[v] = v
if self.verbose > 1:
print('c formula: {0} vars, {1} hard, {2} soft'.format(formula.nv,
len(formula.hard), len(formula.soft))) |
def get_list(file,fmt):
'''makes a list out of the fmt from the LspOutput f using the format
i for int
f for float
d for double
s for string'''
out=[]
for i in fmt:
if i == 'i':
out.append(get_int(file));
elif i == 'f' or i == 'd':
out.append(get_float(file));
elif i == 's':
out.append(get_str(file));
else:
raise ValueError("Unexpected flag '{}'".format(i));
return out; | makes a list out of the fmt from the LspOutput f using the format
i for int
f for float
d for double
s for string | Below is the the instruction that describes the task:
### Input:
makes a list out of the fmt from the LspOutput f using the format
i for int
f for float
d for double
s for string
### Response:
def get_list(file,fmt):
'''makes a list out of the fmt from the LspOutput f using the format
i for int
f for float
d for double
s for string'''
out=[]
for i in fmt:
if i == 'i':
out.append(get_int(file));
elif i == 'f' or i == 'd':
out.append(get_float(file));
elif i == 's':
out.append(get_str(file));
else:
raise ValueError("Unexpected flag '{}'".format(i));
return out; |
def import_class(classpath):
"""Import the class referred to by the fully qualified class path.
Args:
classpath: A full "foo.bar.MyClass" path to a class definition.
Returns:
The class referred to by the classpath.
Raises:
ImportError: If an error occurs while importing the module.
AttributeError: IF the class does not exist in the imported module.
"""
modname, classname = classpath.rsplit(".", 1)
module = importlib.import_module(modname)
klass = getattr(module, classname)
return klass | Import the class referred to by the fully qualified class path.
Args:
classpath: A full "foo.bar.MyClass" path to a class definition.
Returns:
The class referred to by the classpath.
Raises:
ImportError: If an error occurs while importing the module.
AttributeError: IF the class does not exist in the imported module. | Below is the the instruction that describes the task:
### Input:
Import the class referred to by the fully qualified class path.
Args:
classpath: A full "foo.bar.MyClass" path to a class definition.
Returns:
The class referred to by the classpath.
Raises:
ImportError: If an error occurs while importing the module.
AttributeError: IF the class does not exist in the imported module.
### Response:
def import_class(classpath):
"""Import the class referred to by the fully qualified class path.
Args:
classpath: A full "foo.bar.MyClass" path to a class definition.
Returns:
The class referred to by the classpath.
Raises:
ImportError: If an error occurs while importing the module.
AttributeError: IF the class does not exist in the imported module.
"""
modname, classname = classpath.rsplit(".", 1)
module = importlib.import_module(modname)
klass = getattr(module, classname)
return klass |
def snr_ratio(in1, in2):
"""
The following function simply calculates the signal to noise ratio between two signals.
INPUTS:
in1 (no default): Array containing values for signal 1.
in2 (no default): Array containing values for signal 2.
OUTPUTS:
out1 The ratio of the signal to noise ratios of two signals.
"""
out1 = 20*(np.log10(np.linalg.norm(in1)/np.linalg.norm(in1-in2)))
return out1 | The following function simply calculates the signal to noise ratio between two signals.
INPUTS:
in1 (no default): Array containing values for signal 1.
in2 (no default): Array containing values for signal 2.
OUTPUTS:
out1 The ratio of the signal to noise ratios of two signals. | Below is the the instruction that describes the task:
### Input:
The following function simply calculates the signal to noise ratio between two signals.
INPUTS:
in1 (no default): Array containing values for signal 1.
in2 (no default): Array containing values for signal 2.
OUTPUTS:
out1 The ratio of the signal to noise ratios of two signals.
### Response:
def snr_ratio(in1, in2):
"""
The following function simply calculates the signal to noise ratio between two signals.
INPUTS:
in1 (no default): Array containing values for signal 1.
in2 (no default): Array containing values for signal 2.
OUTPUTS:
out1 The ratio of the signal to noise ratios of two signals.
"""
out1 = 20*(np.log10(np.linalg.norm(in1)/np.linalg.norm(in1-in2)))
return out1 |
def _connect(host=DEFAULT_HOST, port=DEFAULT_PORT):
'''
Returns a tuple of (user, host, port) with config, pillar, or default
values assigned to missing values.
'''
if six.text_type(port).isdigit():
return memcache.Client(['{0}:{1}'.format(host, port)], debug=0)
raise SaltInvocationError('port must be an integer') | Returns a tuple of (user, host, port) with config, pillar, or default
values assigned to missing values. | Below is the the instruction that describes the task:
### Input:
Returns a tuple of (user, host, port) with config, pillar, or default
values assigned to missing values.
### Response:
def _connect(host=DEFAULT_HOST, port=DEFAULT_PORT):
'''
Returns a tuple of (user, host, port) with config, pillar, or default
values assigned to missing values.
'''
if six.text_type(port).isdigit():
return memcache.Client(['{0}:{1}'.format(host, port)], debug=0)
raise SaltInvocationError('port must be an integer') |
def schema_columns(self):
"""Return column informatino only from this schema"""
t = self.schema_term
columns = []
if t:
for i, c in enumerate(t.children):
if c.term_is("Table.Column"):
p = c.all_props
p['pos'] = i
p['name'] = c.value
p['header'] = self._name_for_col_term(c, i)
columns.append(p)
return columns | Return column informatino only from this schema | Below is the the instruction that describes the task:
### Input:
Return column informatino only from this schema
### Response:
def schema_columns(self):
"""Return column informatino only from this schema"""
t = self.schema_term
columns = []
if t:
for i, c in enumerate(t.children):
if c.term_is("Table.Column"):
p = c.all_props
p['pos'] = i
p['name'] = c.value
p['header'] = self._name_for_col_term(c, i)
columns.append(p)
return columns |
def _prepare(constituents, t0, t = None, radians = True):
"""
Return constituent speed and equilibrium argument at a given time, and constituent node factors at given times.
Arguments:
constituents -- list of constituents to prepare
t0 -- time at which to evaluate speed and equilibrium argument for each constituent
t -- list of times at which to evaluate node factors for each constituent (default: t0)
radians -- whether to return the angular arguments in radians or degrees (default: True)
"""
#The equilibrium argument is constant and taken at the beginning of the
#time series (t0). The speed of the equilibrium argument changes very
#slowly, so again we take it to be constant over any length of data. The
#node factors change more rapidly.
if isinstance(t0, Iterable):
t0 = t0[0]
if t is None:
t = [t0]
if not isinstance(t, Iterable):
t = [t]
a0 = astro(t0)
a = [astro(t_i) for t_i in t]
#For convenience give u, V0 (but not speed!) in [0, 360)
V0 = np.array([c.V(a0) for c in constituents])[:, np.newaxis]
speed = np.array([c.speed(a0) for c in constituents])[:, np.newaxis]
u = [np.mod(np.array([c.u(a_i) for c in constituents])[:, np.newaxis], 360.0)
for a_i in a]
f = [np.mod(np.array([c.f(a_i) for c in constituents])[:, np.newaxis], 360.0)
for a_i in a]
if radians:
speed = d2r*speed
V0 = d2r*V0
u = [d2r*each for each in u]
return speed, u, f, V0 | Return constituent speed and equilibrium argument at a given time, and constituent node factors at given times.
Arguments:
constituents -- list of constituents to prepare
t0 -- time at which to evaluate speed and equilibrium argument for each constituent
t -- list of times at which to evaluate node factors for each constituent (default: t0)
radians -- whether to return the angular arguments in radians or degrees (default: True) | Below is the the instruction that describes the task:
### Input:
Return constituent speed and equilibrium argument at a given time, and constituent node factors at given times.
Arguments:
constituents -- list of constituents to prepare
t0 -- time at which to evaluate speed and equilibrium argument for each constituent
t -- list of times at which to evaluate node factors for each constituent (default: t0)
radians -- whether to return the angular arguments in radians or degrees (default: True)
### Response:
def _prepare(constituents, t0, t = None, radians = True):
"""
Return constituent speed and equilibrium argument at a given time, and constituent node factors at given times.
Arguments:
constituents -- list of constituents to prepare
t0 -- time at which to evaluate speed and equilibrium argument for each constituent
t -- list of times at which to evaluate node factors for each constituent (default: t0)
radians -- whether to return the angular arguments in radians or degrees (default: True)
"""
#The equilibrium argument is constant and taken at the beginning of the
#time series (t0). The speed of the equilibrium argument changes very
#slowly, so again we take it to be constant over any length of data. The
#node factors change more rapidly.
if isinstance(t0, Iterable):
t0 = t0[0]
if t is None:
t = [t0]
if not isinstance(t, Iterable):
t = [t]
a0 = astro(t0)
a = [astro(t_i) for t_i in t]
#For convenience give u, V0 (but not speed!) in [0, 360)
V0 = np.array([c.V(a0) for c in constituents])[:, np.newaxis]
speed = np.array([c.speed(a0) for c in constituents])[:, np.newaxis]
u = [np.mod(np.array([c.u(a_i) for c in constituents])[:, np.newaxis], 360.0)
for a_i in a]
f = [np.mod(np.array([c.f(a_i) for c in constituents])[:, np.newaxis], 360.0)
for a_i in a]
if radians:
speed = d2r*speed
V0 = d2r*V0
u = [d2r*each for each in u]
return speed, u, f, V0 |
def prepare(self, hash, start, end, name, sources, sample=None):
""" Prepare a historics query which can later be started.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsprepare
:param hash: The hash of a CSDL create the query for
:type hash: str
:param start: when to start querying data from - unix timestamp
:type start: int
:param end: when the query should end - unix timestamp
:type end: int
:param name: the name of the query
:type name: str
:param sources: list of sources e.g. ['facebook','bitly','tumblr']
:type sources: list
:param sample: percentage to sample, either 10 or 100
:type sample: int
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
if len(sources) == 0:
raise HistoricSourcesRequired()
if not isinstance(sources, list):
sources = [sources]
params = {'hash': hash, 'start': start, 'end': end, 'name': name, 'sources': ','.join(sources)}
if sample:
params['sample'] = sample
return self.request.post('prepare', params) | Prepare a historics query which can later be started.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsprepare
:param hash: The hash of a CSDL create the query for
:type hash: str
:param start: when to start querying data from - unix timestamp
:type start: int
:param end: when the query should end - unix timestamp
:type end: int
:param name: the name of the query
:type name: str
:param sources: list of sources e.g. ['facebook','bitly','tumblr']
:type sources: list
:param sample: percentage to sample, either 10 or 100
:type sample: int
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError` | Below is the the instruction that describes the task:
### Input:
Prepare a historics query which can later be started.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsprepare
:param hash: The hash of a CSDL create the query for
:type hash: str
:param start: when to start querying data from - unix timestamp
:type start: int
:param end: when the query should end - unix timestamp
:type end: int
:param name: the name of the query
:type name: str
:param sources: list of sources e.g. ['facebook','bitly','tumblr']
:type sources: list
:param sample: percentage to sample, either 10 or 100
:type sample: int
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
### Response:
def prepare(self, hash, start, end, name, sources, sample=None):
""" Prepare a historics query which can later be started.
Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsprepare
:param hash: The hash of a CSDL create the query for
:type hash: str
:param start: when to start querying data from - unix timestamp
:type start: int
:param end: when the query should end - unix timestamp
:type end: int
:param name: the name of the query
:type name: str
:param sources: list of sources e.g. ['facebook','bitly','tumblr']
:type sources: list
:param sample: percentage to sample, either 10 or 100
:type sample: int
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.HistoricSourcesRequired`, :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
"""
if len(sources) == 0:
raise HistoricSourcesRequired()
if not isinstance(sources, list):
sources = [sources]
params = {'hash': hash, 'start': start, 'end': end, 'name': name, 'sources': ','.join(sources)}
if sample:
params['sample'] = sample
return self.request.post('prepare', params) |
def clear(self):
"Remove all rows and reset internal structures"
## list has no clear ... remove items in reverse order
for i in range(len(self)-1, -1, -1):
del self[i]
self._key = 0
if hasattr(self._grid_view, "wx_obj"):
self._grid_view.wx_obj.ClearGrid() | Remove all rows and reset internal structures | Below is the the instruction that describes the task:
### Input:
Remove all rows and reset internal structures
### Response:
def clear(self):
"Remove all rows and reset internal structures"
## list has no clear ... remove items in reverse order
for i in range(len(self)-1, -1, -1):
del self[i]
self._key = 0
if hasattr(self._grid_view, "wx_obj"):
self._grid_view.wx_obj.ClearGrid() |
def mock_cmd(self, release, *cmd, **kwargs):
"""Run a mock command in the chroot for a given release"""
fmt = '{mock_cmd}'
if kwargs.get('new_chroot') is True:
fmt +=' --new-chroot'
fmt += ' --configdir={mock_dir}'
return self.call(fmt.format(**release).split()
+ list(cmd)) | Run a mock command in the chroot for a given release | Below is the the instruction that describes the task:
### Input:
Run a mock command in the chroot for a given release
### Response:
def mock_cmd(self, release, *cmd, **kwargs):
"""Run a mock command in the chroot for a given release"""
fmt = '{mock_cmd}'
if kwargs.get('new_chroot') is True:
fmt +=' --new-chroot'
fmt += ' --configdir={mock_dir}'
return self.call(fmt.format(**release).split()
+ list(cmd)) |
def from_stmt(stmt, engine, **kwargs):
"""
Execute a query in form of texture clause, return the result in form of
:class:`PrettyTable`.
:type stmt: TextClause
:param stmt:
:type engine: Engine
:param engine:
:rtype: PrettyTable
"""
result_proxy = engine.execute(stmt, **kwargs)
return from_db_cursor(result_proxy.cursor) | Execute a query in form of texture clause, return the result in form of
:class:`PrettyTable`.
:type stmt: TextClause
:param stmt:
:type engine: Engine
:param engine:
:rtype: PrettyTable | Below is the the instruction that describes the task:
### Input:
Execute a query in form of texture clause, return the result in form of
:class:`PrettyTable`.
:type stmt: TextClause
:param stmt:
:type engine: Engine
:param engine:
:rtype: PrettyTable
### Response:
def from_stmt(stmt, engine, **kwargs):
"""
Execute a query in form of texture clause, return the result in form of
:class:`PrettyTable`.
:type stmt: TextClause
:param stmt:
:type engine: Engine
:param engine:
:rtype: PrettyTable
"""
result_proxy = engine.execute(stmt, **kwargs)
return from_db_cursor(result_proxy.cursor) |
def get_user_lists(self, course, aggregationid=''):
""" Get the available student and tutor lists for aggregation edition"""
tutor_list = course.get_staff()
# Determine student list and if they are grouped
student_list = list(self.database.aggregations.aggregate([
{"$match": {"courseid": course.get_id()}},
{"$unwind": "$students"},
{"$project": {
"classroom": "$_id",
"students": 1,
"grouped": {
"$anyElementTrue": {
"$map": {
"input": "$groups.students",
"as": "group",
"in": {
"$anyElementTrue": {
"$map": {
"input": "$$group",
"as": "groupmember",
"in": {"$eq": ["$$groupmember", "$students"]}
}
}
}
}
}
}
}}
]))
student_list = dict([(student["students"], student) for student in student_list])
users_info = self.user_manager.get_users_info(list(student_list.keys()) + tutor_list)
if aggregationid:
# Order the non-registered students
other_students = [student_list[entry]['students'] for entry in student_list.keys() if
not student_list[entry]['classroom'] == ObjectId(aggregationid)]
other_students = sorted(other_students, key=lambda val: (("0"+users_info[val][0]) if users_info[val] else ("1"+val)))
return student_list, tutor_list, other_students, users_info
else:
return student_list, tutor_list, users_info | Get the available student and tutor lists for aggregation edition | Below is the the instruction that describes the task:
### Input:
Get the available student and tutor lists for aggregation edition
### Response:
def get_user_lists(self, course, aggregationid=''):
""" Get the available student and tutor lists for aggregation edition"""
tutor_list = course.get_staff()
# Determine student list and if they are grouped
student_list = list(self.database.aggregations.aggregate([
{"$match": {"courseid": course.get_id()}},
{"$unwind": "$students"},
{"$project": {
"classroom": "$_id",
"students": 1,
"grouped": {
"$anyElementTrue": {
"$map": {
"input": "$groups.students",
"as": "group",
"in": {
"$anyElementTrue": {
"$map": {
"input": "$$group",
"as": "groupmember",
"in": {"$eq": ["$$groupmember", "$students"]}
}
}
}
}
}
}
}}
]))
student_list = dict([(student["students"], student) for student in student_list])
users_info = self.user_manager.get_users_info(list(student_list.keys()) + tutor_list)
if aggregationid:
# Order the non-registered students
other_students = [student_list[entry]['students'] for entry in student_list.keys() if
not student_list[entry]['classroom'] == ObjectId(aggregationid)]
other_students = sorted(other_students, key=lambda val: (("0"+users_info[val][0]) if users_info[val] else ("1"+val)))
return student_list, tutor_list, other_students, users_info
else:
return student_list, tutor_list, users_info |
def getPageSizeByName(self, pageSizeName):
""" Returns a validated PageSize instance corresponding to the given
name. Returns None if the name is not a valid PageSize.
"""
pageSize = None
lowerCaseNames = {pageSize.lower(): pageSize for pageSize in
self.availablePageSizes()}
if pageSizeName.lower() in lowerCaseNames:
pageSize = getattr(QPagedPaintDevice, lowerCaseNames[pageSizeName.lower()])
return pageSize | Returns a validated PageSize instance corresponding to the given
name. Returns None if the name is not a valid PageSize. | Below is the the instruction that describes the task:
### Input:
Returns a validated PageSize instance corresponding to the given
name. Returns None if the name is not a valid PageSize.
### Response:
def getPageSizeByName(self, pageSizeName):
""" Returns a validated PageSize instance corresponding to the given
name. Returns None if the name is not a valid PageSize.
"""
pageSize = None
lowerCaseNames = {pageSize.lower(): pageSize for pageSize in
self.availablePageSizes()}
if pageSizeName.lower() in lowerCaseNames:
pageSize = getattr(QPagedPaintDevice, lowerCaseNames[pageSizeName.lower()])
return pageSize |
def create_ip_arp_request(srchw, srcip, targetip):
'''
Create and return a packet containing an Ethernet header
and ARP header.
'''
ether = Ethernet()
ether.src = srchw
ether.dst = SpecialEthAddr.ETHER_BROADCAST.value
ether.ethertype = EtherType.ARP
arp = Arp()
arp.operation = ArpOperation.Request
arp.senderhwaddr = srchw
arp.senderprotoaddr = srcip
arp.targethwaddr = SpecialEthAddr.ETHER_BROADCAST.value
arp.targetprotoaddr = targetip
return ether + arp | Create and return a packet containing an Ethernet header
and ARP header. | Below is the the instruction that describes the task:
### Input:
Create and return a packet containing an Ethernet header
and ARP header.
### Response:
def create_ip_arp_request(srchw, srcip, targetip):
'''
Create and return a packet containing an Ethernet header
and ARP header.
'''
ether = Ethernet()
ether.src = srchw
ether.dst = SpecialEthAddr.ETHER_BROADCAST.value
ether.ethertype = EtherType.ARP
arp = Arp()
arp.operation = ArpOperation.Request
arp.senderhwaddr = srchw
arp.senderprotoaddr = srcip
arp.targethwaddr = SpecialEthAddr.ETHER_BROADCAST.value
arp.targetprotoaddr = targetip
return ether + arp |
def download_data(identifier, outdir):
"""Download data from a separate data repository for testing.
Parameters
----------
identifier: string
The identifier used to find the data set
outdir: string
unzip the data in this directory
"""
# determine target
if use_local_data_repository is not None:
url_base = 'file:' + request.pathname2url(
use_local_data_repository + os.sep)
else:
url_base = repository_url
print('url_base: {}'.format(url_base))
url = url_base + inventory_filename
# download inventory file
filename, headers =request.urlretrieve(url)
df = pd.read_csv(
filename,
delim_whitespace=True,
comment='#',
header=None,
names=['identifier', 'rel_path'],
)
# find relative path to data file
rel_path_query = df.query('identifier == "{}"'.format(identifier))
if rel_path_query.shape[0] == 0:
raise Exception('identifier not found')
rel_path = rel_path_query['rel_path'].values[0]
# download the file
url = url_base + rel_path
print('data url: {}'.format(url))
filename, headers =request.urlretrieve(url)
if not os.path.isdir(outdir):
os.makedirs(outdir)
zip_obj = zipfile.ZipFile(filename)
zip_obj.extractall(outdir) | Download data from a separate data repository for testing.
Parameters
----------
identifier: string
The identifier used to find the data set
outdir: string
unzip the data in this directory | Below is the the instruction that describes the task:
### Input:
Download data from a separate data repository for testing.
Parameters
----------
identifier: string
The identifier used to find the data set
outdir: string
unzip the data in this directory
### Response:
def download_data(identifier, outdir):
"""Download data from a separate data repository for testing.
Parameters
----------
identifier: string
The identifier used to find the data set
outdir: string
unzip the data in this directory
"""
# determine target
if use_local_data_repository is not None:
url_base = 'file:' + request.pathname2url(
use_local_data_repository + os.sep)
else:
url_base = repository_url
print('url_base: {}'.format(url_base))
url = url_base + inventory_filename
# download inventory file
filename, headers =request.urlretrieve(url)
df = pd.read_csv(
filename,
delim_whitespace=True,
comment='#',
header=None,
names=['identifier', 'rel_path'],
)
# find relative path to data file
rel_path_query = df.query('identifier == "{}"'.format(identifier))
if rel_path_query.shape[0] == 0:
raise Exception('identifier not found')
rel_path = rel_path_query['rel_path'].values[0]
# download the file
url = url_base + rel_path
print('data url: {}'.format(url))
filename, headers =request.urlretrieve(url)
if not os.path.isdir(outdir):
os.makedirs(outdir)
zip_obj = zipfile.ZipFile(filename)
zip_obj.extractall(outdir) |
def paint(self):
"""
Renders a javascript snippet suitable for use as a mapbox-gl fill-extrusion paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet
"""
snippet = {
'fill-extrusion-opacity': VectorStyle.get_style_value(self.opacity),
'fill-extrusion-color': VectorStyle.get_style_value(self.color),
'fill-extrusion-base': VectorStyle.get_style_value(self.base),
'fill-extrusion-height': VectorStyle.get_style_value(self.height)
}
if self.translate:
snippet['fill-extrusion-translate'] = self.translate
return snippet | Renders a javascript snippet suitable for use as a mapbox-gl fill-extrusion paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet | Below is the the instruction that describes the task:
### Input:
Renders a javascript snippet suitable for use as a mapbox-gl fill-extrusion paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet
### Response:
def paint(self):
"""
Renders a javascript snippet suitable for use as a mapbox-gl fill-extrusion paint entry
Returns:
A dict that can be converted to a mapbox-gl javascript paint snippet
"""
snippet = {
'fill-extrusion-opacity': VectorStyle.get_style_value(self.opacity),
'fill-extrusion-color': VectorStyle.get_style_value(self.color),
'fill-extrusion-base': VectorStyle.get_style_value(self.base),
'fill-extrusion-height': VectorStyle.get_style_value(self.height)
}
if self.translate:
snippet['fill-extrusion-translate'] = self.translate
return snippet |
def set_attributes(obj, additional_data):
"""
Given an object and a dictionary, give the object new attributes from that dictionary.
Uses _strip_column_name to git rid of whitespace/uppercase/special characters.
"""
for key, value in additional_data.items():
if hasattr(obj, key):
raise ValueError("Key %s in additional_data already exists in this object" % key)
setattr(obj, _strip_column_name(key), value) | Given an object and a dictionary, give the object new attributes from that dictionary.
Uses _strip_column_name to git rid of whitespace/uppercase/special characters. | Below is the the instruction that describes the task:
### Input:
Given an object and a dictionary, give the object new attributes from that dictionary.
Uses _strip_column_name to git rid of whitespace/uppercase/special characters.
### Response:
def set_attributes(obj, additional_data):
"""
Given an object and a dictionary, give the object new attributes from that dictionary.
Uses _strip_column_name to git rid of whitespace/uppercase/special characters.
"""
for key, value in additional_data.items():
if hasattr(obj, key):
raise ValueError("Key %s in additional_data already exists in this object" % key)
setattr(obj, _strip_column_name(key), value) |
def xlim(min, max):
"""
This function will set the x axis range for all time series plots
Parameters:
min : flt
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
max : flt
The time to end all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.xlim(1500249600, 1500249600 + 86400)
>>> # The same as above, but using different inputs
>>> pytplot.xlim("2017-07-17 00:00:00", "2017-07-18 00:00:00")
"""
if not isinstance(min, (int, float, complex)):
min = tplot_utilities.str_to_int(min)
if not isinstance(max, (int, float, complex)):
max = tplot_utilities.str_to_int(max)
if 'x_range' in tplot_opt_glob:
lim_info['xlast'] = tplot_opt_glob['x_range']
else:
lim_info['xfull'] = Range1d(min, max)
lim_info['xlast'] = Range1d(min, max)
tplot_opt_glob['x_range'] = [min, max]
return | This function will set the x axis range for all time series plots
Parameters:
min : flt
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
max : flt
The time to end all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.xlim(1500249600, 1500249600 + 86400)
>>> # The same as above, but using different inputs
>>> pytplot.xlim("2017-07-17 00:00:00", "2017-07-18 00:00:00") | Below is the the instruction that describes the task:
### Input:
This function will set the x axis range for all time series plots
Parameters:
min : flt
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
max : flt
The time to end all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.xlim(1500249600, 1500249600 + 86400)
>>> # The same as above, but using different inputs
>>> pytplot.xlim("2017-07-17 00:00:00", "2017-07-18 00:00:00")
### Response:
def xlim(min, max):
"""
This function will set the x axis range for all time series plots
Parameters:
min : flt
The time to start all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
max : flt
The time to end all time series plots. Can be given in seconds since epoch, or as a string
in the format "YYYY-MM-DD HH:MM:SS"
Returns:
None
Examples:
>>> # Set the timespan to be 2017-07-17 00:00:00 plus 1 day
>>> import pytplot
>>> pytplot.xlim(1500249600, 1500249600 + 86400)
>>> # The same as above, but using different inputs
>>> pytplot.xlim("2017-07-17 00:00:00", "2017-07-18 00:00:00")
"""
if not isinstance(min, (int, float, complex)):
min = tplot_utilities.str_to_int(min)
if not isinstance(max, (int, float, complex)):
max = tplot_utilities.str_to_int(max)
if 'x_range' in tplot_opt_glob:
lim_info['xlast'] = tplot_opt_glob['x_range']
else:
lim_info['xfull'] = Range1d(min, max)
lim_info['xlast'] = Range1d(min, max)
tplot_opt_glob['x_range'] = [min, max]
return |
def _get_log_lines(self, n=300):
"""Returns a list with the last ``n`` lines of the nextflow log file
Parameters
----------
n : int
Number of last lines from the log file
Returns
-------
list
List of strings with the nextflow log
"""
with open(self.log_file) as fh:
last_lines = fh.readlines()[-n:]
return last_lines | Returns a list with the last ``n`` lines of the nextflow log file
Parameters
----------
n : int
Number of last lines from the log file
Returns
-------
list
List of strings with the nextflow log | Below is the the instruction that describes the task:
### Input:
Returns a list with the last ``n`` lines of the nextflow log file
Parameters
----------
n : int
Number of last lines from the log file
Returns
-------
list
List of strings with the nextflow log
### Response:
def _get_log_lines(self, n=300):
"""Returns a list with the last ``n`` lines of the nextflow log file
Parameters
----------
n : int
Number of last lines from the log file
Returns
-------
list
List of strings with the nextflow log
"""
with open(self.log_file) as fh:
last_lines = fh.readlines()[-n:]
return last_lines |
def _check_choices_attribute(self): # pragma: no cover
"""Checks to make sure that choices contains valid timezone choices."""
if self.choices:
warning_params = {
'msg': (
"'choices' contains an invalid time zone value '{value}' "
"which was not found as a supported time zone by pytz "
"{version}."
),
'hint': "Values must be found in pytz.all_timezones.",
'obj': self,
}
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key in map(lambda x: x[0], option_value):
if optgroup_key not in pytz.all_timezones:
# Make sure we don't raise this error on empty
# values
if optgroup_key not in self.empty_values:
# Update the error message by adding the value
warning_params.update({
'msg': warning_params['msg'].format(
value=optgroup_key,
version=pytz.VERSION
)
})
# Return the warning
return [
checks.Warning(**warning_params)
]
elif option_key not in pytz.all_timezones:
# Make sure we don't raise this error on empty
# values
if option_key not in self.empty_values:
# Update the error message by adding the value
warning_params.update({
'msg': warning_params['msg'].format(
value=option_key,
version=pytz.VERSION
)
})
# Return the warning
return [
checks.Warning(**warning_params)
]
# When no error, return an empty list
return [] | Checks to make sure that choices contains valid timezone choices. | Below is the the instruction that describes the task:
### Input:
Checks to make sure that choices contains valid timezone choices.
### Response:
def _check_choices_attribute(self): # pragma: no cover
"""Checks to make sure that choices contains valid timezone choices."""
if self.choices:
warning_params = {
'msg': (
"'choices' contains an invalid time zone value '{value}' "
"which was not found as a supported time zone by pytz "
"{version}."
),
'hint': "Values must be found in pytz.all_timezones.",
'obj': self,
}
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key in map(lambda x: x[0], option_value):
if optgroup_key not in pytz.all_timezones:
# Make sure we don't raise this error on empty
# values
if optgroup_key not in self.empty_values:
# Update the error message by adding the value
warning_params.update({
'msg': warning_params['msg'].format(
value=optgroup_key,
version=pytz.VERSION
)
})
# Return the warning
return [
checks.Warning(**warning_params)
]
elif option_key not in pytz.all_timezones:
# Make sure we don't raise this error on empty
# values
if option_key not in self.empty_values:
# Update the error message by adding the value
warning_params.update({
'msg': warning_params['msg'].format(
value=option_key,
version=pytz.VERSION
)
})
# Return the warning
return [
checks.Warning(**warning_params)
]
# When no error, return an empty list
return [] |
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared | Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)] | Below is the the instruction that describes the task:
### Input:
Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
### Response:
def items_sharing_ngrams(self, query):
"""Retrieve the subset of items that share n-grams the query string.
:param query: look up items that share N-grams with this string.
:return: mapping from matched string to the number of shared N-grams.
>>> from ngram import NGram
>>> n = NGram(["ham","spam","eggs"])
>>> sorted(n.items_sharing_ngrams("mam").items())
[('ham', 2), ('spam', 2)]
"""
# From matched string to number of N-grams shared with query string
shared = {}
# Dictionary mapping n-gram to string to number of occurrences of that
# ngram in the string that remain to be matched.
remaining = {}
for ngram in self.split(query):
try:
for match, count in self._grams[ngram].items():
remaining.setdefault(ngram, {}).setdefault(match, count)
# match as many occurrences as exist in matched string
if remaining[ngram][match] > 0:
remaining[ngram][match] -= 1
shared.setdefault(match, 0)
shared[match] += 1
except KeyError:
pass
return shared |
def create_dialog(self):
"""Create the dialog."""
bbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.idx_ok = bbox.button(QDialogButtonBox.Ok)
self.idx_cancel = bbox.button(QDialogButtonBox.Cancel)
filebutton = QPushButton()
filebutton.setText('Choose')
self.idx_filename = filebutton
self.xp_format = FormMenu(['CSV', 'Brain Vision'])
self.all_types = FormBool('All event types')
self.idx_evt_type = QListWidget()
self.idx_evt_type.setSelectionMode(QAbstractItemView.ExtendedSelection)
filebutton.clicked.connect(self.save_as)
self.all_types.connect(self.toggle_buttons)
bbox.clicked.connect(self.button_clicked)
form = QFormLayout()
form.addRow('Filename', self.idx_filename)
form.addRow('Format', self.xp_format)
form.addRow(self.all_types)
form.addRow('Event type(s)', self.idx_evt_type)
btnlayout = QHBoxLayout()
btnlayout.addStretch(1)
btnlayout.addWidget(bbox)
vlayout = QVBoxLayout()
vlayout.addLayout(form)
vlayout.addStretch(1)
vlayout.addLayout(btnlayout)
self.setLayout(vlayout) | Create the dialog. | Below is the the instruction that describes the task:
### Input:
Create the dialog.
### Response:
def create_dialog(self):
"""Create the dialog."""
bbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
self.idx_ok = bbox.button(QDialogButtonBox.Ok)
self.idx_cancel = bbox.button(QDialogButtonBox.Cancel)
filebutton = QPushButton()
filebutton.setText('Choose')
self.idx_filename = filebutton
self.xp_format = FormMenu(['CSV', 'Brain Vision'])
self.all_types = FormBool('All event types')
self.idx_evt_type = QListWidget()
self.idx_evt_type.setSelectionMode(QAbstractItemView.ExtendedSelection)
filebutton.clicked.connect(self.save_as)
self.all_types.connect(self.toggle_buttons)
bbox.clicked.connect(self.button_clicked)
form = QFormLayout()
form.addRow('Filename', self.idx_filename)
form.addRow('Format', self.xp_format)
form.addRow(self.all_types)
form.addRow('Event type(s)', self.idx_evt_type)
btnlayout = QHBoxLayout()
btnlayout.addStretch(1)
btnlayout.addWidget(bbox)
vlayout = QVBoxLayout()
vlayout.addLayout(form)
vlayout.addStretch(1)
vlayout.addLayout(btnlayout)
self.setLayout(vlayout) |
def centroid_2dg(data, error=None, mask=None):
"""
Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
a constant) to the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
"""
gfit = fit_2dgaussian(data, error=error, mask=mask)
return np.array([gfit.x_mean.value, gfit.y_mean.value]) | Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
a constant) to the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid. | Below is the the instruction that describes the task:
### Input:
Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
a constant) to the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
### Response:
def centroid_2dg(data, error=None, mask=None):
"""
Calculate the centroid of a 2D array by fitting a 2D Gaussian (plus
a constant) to the array.
Invalid values (e.g. NaNs or infs) in the ``data`` or ``error``
arrays are automatically masked. The mask for invalid values
represents the combination of the invalid-value masks for the
``data`` and ``error`` arrays.
Parameters
----------
data : array_like
The 2D data array.
error : array_like, optional
The 2D array of the 1-sigma errors of the input ``data``.
mask : array_like (bool), optional
A boolean mask, with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Returns
-------
centroid : `~numpy.ndarray`
The ``x, y`` coordinates of the centroid.
"""
gfit = fit_2dgaussian(data, error=error, mask=mask)
return np.array([gfit.x_mean.value, gfit.y_mean.value]) |
def project_closing(self, project):
"""
Called when a project is about to be closed.
:param project: Project instance
"""
yield from super().project_closing(project)
# delete the Dynamips devices corresponding to the project
tasks = []
for device in self._devices.values():
if device.project.id == project.id:
tasks.append(asyncio.async(device.delete()))
if tasks:
done, _ = yield from asyncio.wait(tasks)
for future in done:
try:
future.result()
except (Exception, GeneratorExit) as e:
log.error("Could not delete device {}".format(e), exc_info=1) | Called when a project is about to be closed.
:param project: Project instance | Below is the the instruction that describes the task:
### Input:
Called when a project is about to be closed.
:param project: Project instance
### Response:
def project_closing(self, project):
"""
Called when a project is about to be closed.
:param project: Project instance
"""
yield from super().project_closing(project)
# delete the Dynamips devices corresponding to the project
tasks = []
for device in self._devices.values():
if device.project.id == project.id:
tasks.append(asyncio.async(device.delete()))
if tasks:
done, _ = yield from asyncio.wait(tasks)
for future in done:
try:
future.result()
except (Exception, GeneratorExit) as e:
log.error("Could not delete device {}".format(e), exc_info=1) |
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result | Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
### Response:
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result |
def get_vprof_version(filename):
"""Returns actual version specified in filename."""
with open(filename) as src_file:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
src_file.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version info.') | Returns actual version specified in filename. | Below is the the instruction that describes the task:
### Input:
Returns actual version specified in filename.
### Response:
def get_vprof_version(filename):
"""Returns actual version specified in filename."""
with open(filename) as src_file:
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
src_file.read(), re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version info.') |
def handle_starting_instance(self):
"""Starting up PostgreSQL may take a long time. In case we are the leader we may want to
fail over to."""
# Check if we are in startup, when paused defer to main loop for manual failovers.
if not self.state_handler.check_for_startup() or self.is_paused():
self.set_start_timeout(None)
if self.is_paused():
self.state_handler.set_state(self.state_handler.is_running() and 'running' or 'stopped')
return None
# state_handler.state == 'starting' here
if self.has_lock():
if not self.update_lock():
logger.info("Lost lock while starting up. Demoting self.")
self.demote('immediate-nolock')
return 'stopped PostgreSQL while starting up because leader key was lost'
timeout = self._start_timeout or self.patroni.config['master_start_timeout']
time_left = timeout - self.state_handler.time_in_state()
if time_left <= 0:
if self.is_failover_possible(self.cluster.members):
logger.info("Demoting self because master startup is taking too long")
self.demote('immediate')
return 'stopped PostgreSQL because of startup timeout'
else:
return 'master start has timed out, but continuing to wait because failover is not possible'
else:
msg = self.process_manual_failover_from_leader()
if msg is not None:
return msg
return 'PostgreSQL is still starting up, {0:.0f} seconds until timeout'.format(time_left)
else:
# Use normal processing for standbys
logger.info("Still starting up as a standby.")
return None | Starting up PostgreSQL may take a long time. In case we are the leader we may want to
fail over to. | Below is the the instruction that describes the task:
### Input:
Starting up PostgreSQL may take a long time. In case we are the leader we may want to
fail over to.
### Response:
def handle_starting_instance(self):
"""Starting up PostgreSQL may take a long time. In case we are the leader we may want to
fail over to."""
# Check if we are in startup, when paused defer to main loop for manual failovers.
if not self.state_handler.check_for_startup() or self.is_paused():
self.set_start_timeout(None)
if self.is_paused():
self.state_handler.set_state(self.state_handler.is_running() and 'running' or 'stopped')
return None
# state_handler.state == 'starting' here
if self.has_lock():
if not self.update_lock():
logger.info("Lost lock while starting up. Demoting self.")
self.demote('immediate-nolock')
return 'stopped PostgreSQL while starting up because leader key was lost'
timeout = self._start_timeout or self.patroni.config['master_start_timeout']
time_left = timeout - self.state_handler.time_in_state()
if time_left <= 0:
if self.is_failover_possible(self.cluster.members):
logger.info("Demoting self because master startup is taking too long")
self.demote('immediate')
return 'stopped PostgreSQL because of startup timeout'
else:
return 'master start has timed out, but continuing to wait because failover is not possible'
else:
msg = self.process_manual_failover_from_leader()
if msg is not None:
return msg
return 'PostgreSQL is still starting up, {0:.0f} seconds until timeout'.format(time_left)
else:
# Use normal processing for standbys
logger.info("Still starting up as a standby.")
return None |
def get_next_iteration(self, iteration, iteration_kwargs={}):
"""
BO-HB uses (just like Hyperband) SuccessiveHalving for each iteration.
See Li et al. (2016) for reference.
Parameters
----------
iteration: int
the index of the iteration to be instantiated
Returns
-------
SuccessiveHalving: the SuccessiveHalving iteration with the
corresponding number of configurations
"""
# number of 'SH rungs'
s = self.max_SH_iter - 1 - (iteration%self.max_SH_iter)
# number of configurations in that bracket
n0 = int(np.floor((self.max_SH_iter)/(s+1)) * self.eta**s)
ns = [max(int(n0*(self.eta**(-i))), 1) for i in range(s+1)]
return(SuccessiveHalving(HPB_iter=iteration, num_configs=ns, budgets=self.budgets[(-s-1):], config_sampler=self.config_generator.get_config, **iteration_kwargs)) | BO-HB uses (just like Hyperband) SuccessiveHalving for each iteration.
See Li et al. (2016) for reference.
Parameters
----------
iteration: int
the index of the iteration to be instantiated
Returns
-------
SuccessiveHalving: the SuccessiveHalving iteration with the
corresponding number of configurations | Below is the the instruction that describes the task:
### Input:
BO-HB uses (just like Hyperband) SuccessiveHalving for each iteration.
See Li et al. (2016) for reference.
Parameters
----------
iteration: int
the index of the iteration to be instantiated
Returns
-------
SuccessiveHalving: the SuccessiveHalving iteration with the
corresponding number of configurations
### Response:
def get_next_iteration(self, iteration, iteration_kwargs={}):
"""
BO-HB uses (just like Hyperband) SuccessiveHalving for each iteration.
See Li et al. (2016) for reference.
Parameters
----------
iteration: int
the index of the iteration to be instantiated
Returns
-------
SuccessiveHalving: the SuccessiveHalving iteration with the
corresponding number of configurations
"""
# number of 'SH rungs'
s = self.max_SH_iter - 1 - (iteration%self.max_SH_iter)
# number of configurations in that bracket
n0 = int(np.floor((self.max_SH_iter)/(s+1)) * self.eta**s)
ns = [max(int(n0*(self.eta**(-i))), 1) for i in range(s+1)]
return(SuccessiveHalving(HPB_iter=iteration, num_configs=ns, budgets=self.budgets[(-s-1):], config_sampler=self.config_generator.get_config, **iteration_kwargs)) |
def __related_categories(self, category_id):
""" Get all related categories to a given one """
related = []
for cat in self.categories_tree:
if category_id in self.categories_tree[cat]:
related.append(self.categories[cat])
return related | Get all related categories to a given one | Below is the the instruction that describes the task:
### Input:
Get all related categories to a given one
### Response:
def __related_categories(self, category_id):
""" Get all related categories to a given one """
related = []
for cat in self.categories_tree:
if category_id in self.categories_tree[cat]:
related.append(self.categories[cat])
return related |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.