code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _search(self, trie, strings, limit=None):
"""Search in cache
:param strings: list of strings to get from the cache
:type strings: str list
:param limit: limit search results
:type limit: int
:rtype: [Resource | Collection]
"""
results = [trie.has_keys_with_prefix(s) for s in strings]
if not any(results):
return []
for result, s in zip(results, strings):
if result is True:
return trie.values(s)[:limit] | Search in cache
:param strings: list of strings to get from the cache
:type strings: str list
:param limit: limit search results
:type limit: int
:rtype: [Resource | Collection] | Below is the the instruction that describes the task:
### Input:
Search in cache
:param strings: list of strings to get from the cache
:type strings: str list
:param limit: limit search results
:type limit: int
:rtype: [Resource | Collection]
### Response:
def _search(self, trie, strings, limit=None):
"""Search in cache
:param strings: list of strings to get from the cache
:type strings: str list
:param limit: limit search results
:type limit: int
:rtype: [Resource | Collection]
"""
results = [trie.has_keys_with_prefix(s) for s in strings]
if not any(results):
return []
for result, s in zip(results, strings):
if result is True:
return trie.values(s)[:limit] |
def patch_traces(
self,
project_id,
traces,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sends new traces to Stackdriver Trace or updates existing traces. If the ID
of a trace that you send matches that of an existing trace, any fields
in the existing trace and its spans are overwritten by the provided values,
and any new fields provided are merged with the existing trace data. If the
ID does not match, a new trace is created.
Example:
>>> from google.cloud import trace_v1
>>>
>>> client = trace_v1.TraceServiceClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `traces`:
>>> traces = {}
>>>
>>> client.patch_traces(project_id, traces)
Args:
project_id (str): ID of the Cloud project where the trace data is stored.
traces (Union[dict, ~google.cloud.trace_v1.types.Traces]): The body of the message.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v1.types.Traces`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "patch_traces" not in self._inner_api_calls:
self._inner_api_calls[
"patch_traces"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.patch_traces,
default_retry=self._method_configs["PatchTraces"].retry,
default_timeout=self._method_configs["PatchTraces"].timeout,
client_info=self._client_info,
)
request = trace_pb2.PatchTracesRequest(project_id=project_id, traces=traces)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_id", project_id)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["patch_traces"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Sends new traces to Stackdriver Trace or updates existing traces. If the ID
of a trace that you send matches that of an existing trace, any fields
in the existing trace and its spans are overwritten by the provided values,
and any new fields provided are merged with the existing trace data. If the
ID does not match, a new trace is created.
Example:
>>> from google.cloud import trace_v1
>>>
>>> client = trace_v1.TraceServiceClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `traces`:
>>> traces = {}
>>>
>>> client.patch_traces(project_id, traces)
Args:
project_id (str): ID of the Cloud project where the trace data is stored.
traces (Union[dict, ~google.cloud.trace_v1.types.Traces]): The body of the message.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v1.types.Traces`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Sends new traces to Stackdriver Trace or updates existing traces. If the ID
of a trace that you send matches that of an existing trace, any fields
in the existing trace and its spans are overwritten by the provided values,
and any new fields provided are merged with the existing trace data. If the
ID does not match, a new trace is created.
Example:
>>> from google.cloud import trace_v1
>>>
>>> client = trace_v1.TraceServiceClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `traces`:
>>> traces = {}
>>>
>>> client.patch_traces(project_id, traces)
Args:
project_id (str): ID of the Cloud project where the trace data is stored.
traces (Union[dict, ~google.cloud.trace_v1.types.Traces]): The body of the message.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v1.types.Traces`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def patch_traces(
self,
project_id,
traces,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sends new traces to Stackdriver Trace or updates existing traces. If the ID
of a trace that you send matches that of an existing trace, any fields
in the existing trace and its spans are overwritten by the provided values,
and any new fields provided are merged with the existing trace data. If the
ID does not match, a new trace is created.
Example:
>>> from google.cloud import trace_v1
>>>
>>> client = trace_v1.TraceServiceClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `traces`:
>>> traces = {}
>>>
>>> client.patch_traces(project_id, traces)
Args:
project_id (str): ID of the Cloud project where the trace data is stored.
traces (Union[dict, ~google.cloud.trace_v1.types.Traces]): The body of the message.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.trace_v1.types.Traces`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "patch_traces" not in self._inner_api_calls:
self._inner_api_calls[
"patch_traces"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.patch_traces,
default_retry=self._method_configs["PatchTraces"].retry,
default_timeout=self._method_configs["PatchTraces"].timeout,
client_info=self._client_info,
)
request = trace_pb2.PatchTracesRequest(project_id=project_id, traces=traces)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("project_id", project_id)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
self._inner_api_calls["patch_traces"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
def mode(self, set_bytes):
"""Set the operating protocol of the USB-ISS with additional
parameters for the protocol
"""
self._mode = set_bytes
data = [self.ISS_CMD, self.ISS_SET_MODE] + set_bytes
self.write_data(data)
response = self.read_data(2)
if response[0] == 0:
error_dict = {
0x05: 'Unknown Command',
0x06: 'Internal Error 1',
0x07: 'Internal Error 2'
}
try:
raise USBISSError(error_dict[response(1)])
except KeyError:
raise USBISSError('Undocumented Error') | Set the operating protocol of the USB-ISS with additional
parameters for the protocol | Below is the the instruction that describes the task:
### Input:
Set the operating protocol of the USB-ISS with additional
parameters for the protocol
### Response:
def mode(self, set_bytes):
"""Set the operating protocol of the USB-ISS with additional
parameters for the protocol
"""
self._mode = set_bytes
data = [self.ISS_CMD, self.ISS_SET_MODE] + set_bytes
self.write_data(data)
response = self.read_data(2)
if response[0] == 0:
error_dict = {
0x05: 'Unknown Command',
0x06: 'Internal Error 1',
0x07: 'Internal Error 2'
}
try:
raise USBISSError(error_dict[response(1)])
except KeyError:
raise USBISSError('Undocumented Error') |
def with_args(self, **kwargs):
"""Send these keyword-arguments to the phase when called."""
# Make a copy so we can have multiple of the same phase with different args
# in the same test.
new_info = mutablerecords.CopyRecord(self)
new_info.options = new_info.options.format_strings(**kwargs)
new_info.extra_kwargs.update(kwargs)
new_info.measurements = [m.with_args(**kwargs) for m in self.measurements]
return new_info | Send these keyword-arguments to the phase when called. | Below is the the instruction that describes the task:
### Input:
Send these keyword-arguments to the phase when called.
### Response:
def with_args(self, **kwargs):
"""Send these keyword-arguments to the phase when called."""
# Make a copy so we can have multiple of the same phase with different args
# in the same test.
new_info = mutablerecords.CopyRecord(self)
new_info.options = new_info.options.format_strings(**kwargs)
new_info.extra_kwargs.update(kwargs)
new_info.measurements = [m.with_args(**kwargs) for m in self.measurements]
return new_info |
def plot_comp(df_var, fig=None, ax=None, **kwargs):
"""Short summary.
Parameters
----------
df_var : pd.DataFrame
DataFrame containing variables to plot with datetime as index
Returns
-------
MPL.figure
figure showing 1:1 line plot
"""
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is None:
fig = ax.get_figure()
elif ax is None:
ax = fig.gca()
# plt.clf()
# plt.cla()
# ax = sns.regplot(
# x='Obs', y='Sim',
# data=df_var,
# fit_reg=True)
# add regression expression
df_var_fit = df_var.dropna(how='any')
# regr = linear_model.LinearRegression()
# val_x = df_var_fit['Obs'].values.reshape(-1, 1)
# val_y = df_var_fit['Sim'].values.reshape(-1, 1)
# regr.fit(val_x, val_y)
val_x = df_var_fit['Obs']
val_y = df_var_fit['Sim']
slope, intercept, r_value, p_value, std_err = stats.linregress(
val_x, val_y)
mae = (val_y - val_x).abs().mean()
sns.regplot(
x='Obs', y='Sim',
data=df_var,
ax=ax,
fit_reg=True,
line_kws={
'label': "y={0:.2f}x{1}{2:.2f}".format(slope, '+' if intercept>0 else '', intercept) +
'\n' + '$R^2$={0:.4f}'.format(r_value) +
'\n' + 'MAE={0:.2f}'.format(mae) +
'\n' + 'n={}'.format(df_var.shape[0])
},
**kwargs
)
# ax.plot(val_x, y_pred, color='red', linewidth=2,
# label='r2= ' + str("%.3f" % r2) + '\n' +
# 'y=' + str("%.3f" % a[0][0]) + 'x+' + str("%.2f" % b[0]))
# ax.legend(fontsize=15)
ax.legend()
# ax.set_title(var + '_' + title)
# set equal plotting range
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
lim_low, lim_high = np.min([x0, y0]), np.max([x1, y1])
ax.set_xlim(lim_low, lim_high)
ax.set_ylim(lim_low, lim_high)
# set 1:1 aspect ratio
ax.set_aspect('equal')
# add 1:1 line
ax.plot([lim_low, lim_high], [lim_low, lim_high],
color='red', linewidth=1, zorder=0)
# fig = ax.figure
return fig, ax | Short summary.
Parameters
----------
df_var : pd.DataFrame
DataFrame containing variables to plot with datetime as index
Returns
-------
MPL.figure
figure showing 1:1 line plot | Below is the the instruction that describes the task:
### Input:
Short summary.
Parameters
----------
df_var : pd.DataFrame
DataFrame containing variables to plot with datetime as index
Returns
-------
MPL.figure
figure showing 1:1 line plot
### Response:
def plot_comp(df_var, fig=None, ax=None, **kwargs):
"""Short summary.
Parameters
----------
df_var : pd.DataFrame
DataFrame containing variables to plot with datetime as index
Returns
-------
MPL.figure
figure showing 1:1 line plot
"""
if fig is None and ax is None:
fig, ax = plt.subplots()
elif fig is None:
fig = ax.get_figure()
elif ax is None:
ax = fig.gca()
# plt.clf()
# plt.cla()
# ax = sns.regplot(
# x='Obs', y='Sim',
# data=df_var,
# fit_reg=True)
# add regression expression
df_var_fit = df_var.dropna(how='any')
# regr = linear_model.LinearRegression()
# val_x = df_var_fit['Obs'].values.reshape(-1, 1)
# val_y = df_var_fit['Sim'].values.reshape(-1, 1)
# regr.fit(val_x, val_y)
val_x = df_var_fit['Obs']
val_y = df_var_fit['Sim']
slope, intercept, r_value, p_value, std_err = stats.linregress(
val_x, val_y)
mae = (val_y - val_x).abs().mean()
sns.regplot(
x='Obs', y='Sim',
data=df_var,
ax=ax,
fit_reg=True,
line_kws={
'label': "y={0:.2f}x{1}{2:.2f}".format(slope, '+' if intercept>0 else '', intercept) +
'\n' + '$R^2$={0:.4f}'.format(r_value) +
'\n' + 'MAE={0:.2f}'.format(mae) +
'\n' + 'n={}'.format(df_var.shape[0])
},
**kwargs
)
# ax.plot(val_x, y_pred, color='red', linewidth=2,
# label='r2= ' + str("%.3f" % r2) + '\n' +
# 'y=' + str("%.3f" % a[0][0]) + 'x+' + str("%.2f" % b[0]))
# ax.legend(fontsize=15)
ax.legend()
# ax.set_title(var + '_' + title)
# set equal plotting range
x0, x1 = ax.get_xlim()
y0, y1 = ax.get_ylim()
lim_low, lim_high = np.min([x0, y0]), np.max([x1, y1])
ax.set_xlim(lim_low, lim_high)
ax.set_ylim(lim_low, lim_high)
# set 1:1 aspect ratio
ax.set_aspect('equal')
# add 1:1 line
ax.plot([lim_low, lim_high], [lim_low, lim_high],
color='red', linewidth=1, zorder=0)
# fig = ax.figure
return fig, ax |
def marvcli_query(ctx, list_tags, collections, discarded, outdated, path, tags, null):
"""Query datasets.
Use --collection=* to list all datasets across all collections.
"""
if not any([collections, discarded, list_tags, outdated, path, tags]):
click.echo(ctx.get_help())
ctx.exit(1)
sep = '\x00' if null else '\n'
site = create_app().site
if '*' in collections:
collections = None
else:
for col in collections:
if col not in site.collections:
ctx.fail('Unknown collection: {}'.format(col))
if list_tags:
tags = site.listtags(collections)
if tags:
click.echo(sep.join(tags), nl=not null)
else:
click.echo('no tags', err=True)
return
setids = site.query(collections, discarded, outdated, path, tags)
if setids:
sep = '\x00' if null else '\n'
click.echo(sep.join(setids), nl=not null) | Query datasets.
Use --collection=* to list all datasets across all collections. | Below is the the instruction that describes the task:
### Input:
Query datasets.
Use --collection=* to list all datasets across all collections.
### Response:
def marvcli_query(ctx, list_tags, collections, discarded, outdated, path, tags, null):
"""Query datasets.
Use --collection=* to list all datasets across all collections.
"""
if not any([collections, discarded, list_tags, outdated, path, tags]):
click.echo(ctx.get_help())
ctx.exit(1)
sep = '\x00' if null else '\n'
site = create_app().site
if '*' in collections:
collections = None
else:
for col in collections:
if col not in site.collections:
ctx.fail('Unknown collection: {}'.format(col))
if list_tags:
tags = site.listtags(collections)
if tags:
click.echo(sep.join(tags), nl=not null)
else:
click.echo('no tags', err=True)
return
setids = site.query(collections, discarded, outdated, path, tags)
if setids:
sep = '\x00' if null else '\n'
click.echo(sep.join(setids), nl=not null) |
def get_ticker(self):
'''Return a ticker object. Generated from quote and trade.'''
lastQuote = self.data['quote'][-1]
lastTrade = self.data['trade'][-1]
ticker = {
"last": lastTrade['price'],
"buy": lastQuote['bidPrice'],
"sell": lastQuote['askPrice'],
"mid": (float(lastQuote['bidPrice'] or 0) + float(lastQuote['askPrice'] or 0)) / 2
}
# The instrument has a tickSize. Use it to round values.
instrument = self.data['instrument'][0]
return {k: round(float(v or 0), instrument['tickLog']) for k, v in list(ticker.items())} | Return a ticker object. Generated from quote and trade. | Below is the the instruction that describes the task:
### Input:
Return a ticker object. Generated from quote and trade.
### Response:
def get_ticker(self):
'''Return a ticker object. Generated from quote and trade.'''
lastQuote = self.data['quote'][-1]
lastTrade = self.data['trade'][-1]
ticker = {
"last": lastTrade['price'],
"buy": lastQuote['bidPrice'],
"sell": lastQuote['askPrice'],
"mid": (float(lastQuote['bidPrice'] or 0) + float(lastQuote['askPrice'] or 0)) / 2
}
# The instrument has a tickSize. Use it to round values.
instrument = self.data['instrument'][0]
return {k: round(float(v or 0), instrument['tickLog']) for k, v in list(ticker.items())} |
def get_port_profile_for_intf_output_interface_port_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
interface = ET.SubElement(output, "interface")
port_profile = ET.SubElement(interface, "port-profile")
name = ET.SubElement(port_profile, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_port_profile_for_intf_output_interface_port_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
interface = ET.SubElement(output, "interface")
port_profile = ET.SubElement(interface, "port-profile")
name = ET.SubElement(port_profile, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_rowspanned_columns(self):
"""Return a dictionary mapping column indices to the number of columns
spanned."""
spanned_columns = {}
current_row_index = self._index
current_row_cols = sum(cell.colspan for cell in self)
prev_rows = iter(reversed(self.section[:current_row_index]))
while current_row_cols < self.section.num_columns:
row = next(prev_rows)
min_rowspan = current_row_index - int(row._index)
if row.maximum_rowspan > min_rowspan:
for cell in (c for c in row if c.rowspan > min_rowspan):
col_index = int(cell.column_index)
spanned_columns[col_index] = cell.colspan
current_row_cols += cell.colspan
return spanned_columns | Return a dictionary mapping column indices to the number of columns
spanned. | Below is the the instruction that describes the task:
### Input:
Return a dictionary mapping column indices to the number of columns
spanned.
### Response:
def get_rowspanned_columns(self):
"""Return a dictionary mapping column indices to the number of columns
spanned."""
spanned_columns = {}
current_row_index = self._index
current_row_cols = sum(cell.colspan for cell in self)
prev_rows = iter(reversed(self.section[:current_row_index]))
while current_row_cols < self.section.num_columns:
row = next(prev_rows)
min_rowspan = current_row_index - int(row._index)
if row.maximum_rowspan > min_rowspan:
for cell in (c for c in row if c.rowspan > min_rowspan):
col_index = int(cell.column_index)
spanned_columns[col_index] = cell.colspan
current_row_cols += cell.colspan
return spanned_columns |
def _repr_values(self):
"""Return values that are to be shown in repr string."""
def getattr_better(obj, field):
try:
return getattr(obj, field)
except AttributeError as e:
try:
return getattr(obj, '_' + field)
except AttributeError:
raise e
return (getattr_better(self, attr) for attr in self._repr_attributes) | Return values that are to be shown in repr string. | Below is the the instruction that describes the task:
### Input:
Return values that are to be shown in repr string.
### Response:
def _repr_values(self):
"""Return values that are to be shown in repr string."""
def getattr_better(obj, field):
try:
return getattr(obj, field)
except AttributeError as e:
try:
return getattr(obj, '_' + field)
except AttributeError:
raise e
return (getattr_better(self, attr) for attr in self._repr_attributes) |
def ensure_text(str_or_bytes, encoding='utf-8'):
"""Ensures an input is a string, decoding if it is bytes.
"""
if not isinstance(str_or_bytes, six.text_type):
return str_or_bytes.decode(encoding)
return str_or_bytes | Ensures an input is a string, decoding if it is bytes. | Below is the the instruction that describes the task:
### Input:
Ensures an input is a string, decoding if it is bytes.
### Response:
def ensure_text(str_or_bytes, encoding='utf-8'):
"""Ensures an input is a string, decoding if it is bytes.
"""
if not isinstance(str_or_bytes, six.text_type):
return str_or_bytes.decode(encoding)
return str_or_bytes |
def _all_add_or_modify_row(self, item_name, insert_dict, table, index=None, condition=None,
condvars=None,
flags=(ADD_ROW, MODIFY_ROW,)):
"""Adds or changes a row in a pytable.
:param item_name: Name of item, the row is about, only important for throwing errors.
:param insert_dict:
Dictionary of data that is about to be inserted into the pytables row.
:param table:
The table to insert or modify a row in
:param index:
Index of row to be modified. Instead of an index a search condition can be
used as well, see below.
:param condition:
Condition to search for in the table
:param condvars:
Variables for the search condition
:param flags:
Flags whether to add, modify, or remove a row in the table
"""
if len(flags) == 0:
# No flags means no-op
return
# You can only specify either an index or a condition not both
if index is not None and condition is not None:
raise ValueError('Please give either a condition or an index or none!')
elif condition is not None:
row_iterator = table.where(condition, condvars=condvars)
elif index is not None:
row_iterator = table.iterrows(index, index + 1)
else:
row_iterator = None
try:
row = next(row_iterator)
except TypeError:
row = None
except StopIteration:
row = None
# multiple_entries = []
if ((HDF5StorageService.MODIFY_ROW in flags or HDF5StorageService.ADD_ROW in flags) and
HDF5StorageService.REMOVE_ROW in flags):
# You cannot remove and modify or add at the same time
raise ValueError('You cannot add or modify and remove a row at the same time.')
if row is None and HDF5StorageService.ADD_ROW in flags:
# Here we add a new row
row = table.row
self._all_insert_into_row(row, insert_dict)
row.append()
elif row is not None and HDF5StorageService.MODIFY_ROW in flags:
# Here we modify an existing row
self._all_insert_into_row(row, insert_dict)
row.update()
elif HDF5StorageService.REMOVE_ROW in flags:
# Here we delete an existing row
if row is not None:
# Only delete if the row does exist otherwise we do not have to do anything
row_number = row.nrow
try:
table.remove_rows(start=row_number, stop=row_number+1)
except NotImplementedError:
pass
# We get here if we try to remove the last row of a table
# there is nothing we can do but keep it :-(
else:
raise ValueError('Something is wrong, you might not have found '
'a row, or your flags are not set appropriately')
self._all_kill_iterator(row_iterator)
table.flush()
if HDF5StorageService.REMOVE_ROW not in flags and row is None:
raise RuntimeError('Could not add or modify entries of `%s` in '
'table %s' % (item_name, table._v_name)) | Adds or changes a row in a pytable.
:param item_name: Name of item, the row is about, only important for throwing errors.
:param insert_dict:
Dictionary of data that is about to be inserted into the pytables row.
:param table:
The table to insert or modify a row in
:param index:
Index of row to be modified. Instead of an index a search condition can be
used as well, see below.
:param condition:
Condition to search for in the table
:param condvars:
Variables for the search condition
:param flags:
Flags whether to add, modify, or remove a row in the table | Below is the the instruction that describes the task:
### Input:
Adds or changes a row in a pytable.
:param item_name: Name of item, the row is about, only important for throwing errors.
:param insert_dict:
Dictionary of data that is about to be inserted into the pytables row.
:param table:
The table to insert or modify a row in
:param index:
Index of row to be modified. Instead of an index a search condition can be
used as well, see below.
:param condition:
Condition to search for in the table
:param condvars:
Variables for the search condition
:param flags:
Flags whether to add, modify, or remove a row in the table
### Response:
def _all_add_or_modify_row(self, item_name, insert_dict, table, index=None, condition=None,
condvars=None,
flags=(ADD_ROW, MODIFY_ROW,)):
"""Adds or changes a row in a pytable.
:param item_name: Name of item, the row is about, only important for throwing errors.
:param insert_dict:
Dictionary of data that is about to be inserted into the pytables row.
:param table:
The table to insert or modify a row in
:param index:
Index of row to be modified. Instead of an index a search condition can be
used as well, see below.
:param condition:
Condition to search for in the table
:param condvars:
Variables for the search condition
:param flags:
Flags whether to add, modify, or remove a row in the table
"""
if len(flags) == 0:
# No flags means no-op
return
# You can only specify either an index or a condition not both
if index is not None and condition is not None:
raise ValueError('Please give either a condition or an index or none!')
elif condition is not None:
row_iterator = table.where(condition, condvars=condvars)
elif index is not None:
row_iterator = table.iterrows(index, index + 1)
else:
row_iterator = None
try:
row = next(row_iterator)
except TypeError:
row = None
except StopIteration:
row = None
# multiple_entries = []
if ((HDF5StorageService.MODIFY_ROW in flags or HDF5StorageService.ADD_ROW in flags) and
HDF5StorageService.REMOVE_ROW in flags):
# You cannot remove and modify or add at the same time
raise ValueError('You cannot add or modify and remove a row at the same time.')
if row is None and HDF5StorageService.ADD_ROW in flags:
# Here we add a new row
row = table.row
self._all_insert_into_row(row, insert_dict)
row.append()
elif row is not None and HDF5StorageService.MODIFY_ROW in flags:
# Here we modify an existing row
self._all_insert_into_row(row, insert_dict)
row.update()
elif HDF5StorageService.REMOVE_ROW in flags:
# Here we delete an existing row
if row is not None:
# Only delete if the row does exist otherwise we do not have to do anything
row_number = row.nrow
try:
table.remove_rows(start=row_number, stop=row_number+1)
except NotImplementedError:
pass
# We get here if we try to remove the last row of a table
# there is nothing we can do but keep it :-(
else:
raise ValueError('Something is wrong, you might not have found '
'a row, or your flags are not set appropriately')
self._all_kill_iterator(row_iterator)
table.flush()
if HDF5StorageService.REMOVE_ROW not in flags and row is None:
raise RuntimeError('Could not add or modify entries of `%s` in '
'table %s' % (item_name, table._v_name)) |
def to_list(i, use_keys=False):
'''
Converts items to a list.
:param i: Item to convert
* If `i` is ``None``, the result is an empty list
* If `i` is 'string', the result won't be \
``['s', 't', 'r',...]`` rather more like ``['string']``
* If `i` is a nested dictionary, the result will be a flattened list.
:param use_keys:
If i is a dictionary, use the keys instead of values
:returns:
All items in i as list
'''
from photon.util.system import shell_notify
if not i:
return []
if isinstance(i, str):
return [i]
if isinstance(i, list):
return i
if isinstance(i, dict):
res = list()
for e in i.keys() if use_keys else i.values():
res.append(to_list(e)) if isinstance(e, dict) else res.append(e)
return res
shell_notify('type for %s uncovered' % (i), state=True, more=type(i)) | Converts items to a list.
:param i: Item to convert
* If `i` is ``None``, the result is an empty list
* If `i` is 'string', the result won't be \
``['s', 't', 'r',...]`` rather more like ``['string']``
* If `i` is a nested dictionary, the result will be a flattened list.
:param use_keys:
If i is a dictionary, use the keys instead of values
:returns:
All items in i as list | Below is the the instruction that describes the task:
### Input:
Converts items to a list.
:param i: Item to convert
* If `i` is ``None``, the result is an empty list
* If `i` is 'string', the result won't be \
``['s', 't', 'r',...]`` rather more like ``['string']``
* If `i` is a nested dictionary, the result will be a flattened list.
:param use_keys:
If i is a dictionary, use the keys instead of values
:returns:
All items in i as list
### Response:
def to_list(i, use_keys=False):
'''
Converts items to a list.
:param i: Item to convert
* If `i` is ``None``, the result is an empty list
* If `i` is 'string', the result won't be \
``['s', 't', 'r',...]`` rather more like ``['string']``
* If `i` is a nested dictionary, the result will be a flattened list.
:param use_keys:
If i is a dictionary, use the keys instead of values
:returns:
All items in i as list
'''
from photon.util.system import shell_notify
if not i:
return []
if isinstance(i, str):
return [i]
if isinstance(i, list):
return i
if isinstance(i, dict):
res = list()
for e in i.keys() if use_keys else i.values():
res.append(to_list(e)) if isinstance(e, dict) else res.append(e)
return res
shell_notify('type for %s uncovered' % (i), state=True, more=type(i)) |
def check_sso_login(self, operator_email, request_id):
"""
Login to the CloudGenix API, and see if SAML SSO has occurred.
This function is used to check and see if SAML SSO has succeeded while waiting.
**Parameters:**
- **operator_email:** String with the username to log in with
- **request_id:** String containing the SAML 2.0 Request ID from previous login attempt.
**Returns:** Tuple (Boolean success, Token on success, JSON response on error.)
"""
data = {
"email": operator_email,
"requestId": request_id
}
# If debug is set..
api_logger.info('check_sso_login function:')
response = self._parent_class.post.login(data=data)
# If valid response, but no token.
if not response.cgx_content.get('x_auth_token'):
# no valid login yet.
return response
# update with token and region
auth_region = self._parent_class.parse_region(response)
self._parent_class.update_region_to_controller(auth_region)
self._parent_class.reparse_login_cookie_after_region_update(response)
return response | Login to the CloudGenix API, and see if SAML SSO has occurred.
This function is used to check and see if SAML SSO has succeeded while waiting.
**Parameters:**
- **operator_email:** String with the username to log in with
- **request_id:** String containing the SAML 2.0 Request ID from previous login attempt.
**Returns:** Tuple (Boolean success, Token on success, JSON response on error.) | Below is the the instruction that describes the task:
### Input:
Login to the CloudGenix API, and see if SAML SSO has occurred.
This function is used to check and see if SAML SSO has succeeded while waiting.
**Parameters:**
- **operator_email:** String with the username to log in with
- **request_id:** String containing the SAML 2.0 Request ID from previous login attempt.
**Returns:** Tuple (Boolean success, Token on success, JSON response on error.)
### Response:
def check_sso_login(self, operator_email, request_id):
"""
Login to the CloudGenix API, and see if SAML SSO has occurred.
This function is used to check and see if SAML SSO has succeeded while waiting.
**Parameters:**
- **operator_email:** String with the username to log in with
- **request_id:** String containing the SAML 2.0 Request ID from previous login attempt.
**Returns:** Tuple (Boolean success, Token on success, JSON response on error.)
"""
data = {
"email": operator_email,
"requestId": request_id
}
# If debug is set..
api_logger.info('check_sso_login function:')
response = self._parent_class.post.login(data=data)
# If valid response, but no token.
if not response.cgx_content.get('x_auth_token'):
# no valid login yet.
return response
# update with token and region
auth_region = self._parent_class.parse_region(response)
self._parent_class.update_region_to_controller(auth_region)
self._parent_class.reparse_login_cookie_after_region_update(response)
return response |
def prep_message(msg):
"""
Add the size header
"""
if six.PY3:
msg_out = msg.as_string().encode("utf-8")
else:
msg_out = msg.as_string()
our_len = len(msg_out) + 4
size = struct.pack('>L', our_len)
# why the hell is this "bytes" on python3?
return size + msg_out | Add the size header | Below is the the instruction that describes the task:
### Input:
Add the size header
### Response:
def prep_message(msg):
"""
Add the size header
"""
if six.PY3:
msg_out = msg.as_string().encode("utf-8")
else:
msg_out = msg.as_string()
our_len = len(msg_out) + 4
size = struct.pack('>L', our_len)
# why the hell is this "bytes" on python3?
return size + msg_out |
def receive_message_batch(self, max_batch_size=None, on_message_received=None, timeout=0):
"""Receive a batch of messages. Messages returned in the batch have already been
accepted - if you wish to add logic to accept or reject messages based on custom
criteria, pass in a callback. This method will return as soon as some messages are
available rather than waiting to achieve a specific batch size, and therefore the
number of messages returned per call will vary up to the maximum allowed.
If the receive client is configured with `auto_complete=True` then the messages received
in the batch returned by this function will already be settled. Alternatively, if
`auto_complete=False`, then each message will need to be explicitly settled before
it expires and is released.
:param max_batch_size: The maximum number of messages that can be returned in
one call. This value cannot be larger than the prefetch value, and if not specified,
the prefetch value will be used.
:type max_batch_size: int
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
:param timeout: I timeout in milliseconds for which to wait to receive any messages.
If no messages are received in this time, an empty list will be returned. If set to
0, the client will continue to wait until at least one message is received. The
default is 0.
:type timeout: int
"""
self._message_received_callback = on_message_received
max_batch_size = max_batch_size or self._prefetch
if max_batch_size > self._prefetch:
raise ValueError(
'Maximum batch size cannot be greater than the '
'connection link credit: {}'.format(self._prefetch))
timeout = self._counter.get_current_ms() + timeout if timeout else 0
expired = False
self._received_messages = self._received_messages or compat.queue.Queue()
self.open()
receiving = True
batch = []
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
if len(batch) >= max_batch_size:
return batch
while receiving and not expired and len(batch) < max_batch_size:
while receiving and self._received_messages.qsize() < max_batch_size:
if timeout and self._counter.get_current_ms() > timeout:
expired = True
break
before = self._received_messages.qsize()
receiving = self.do_work()
received = self._received_messages.qsize() - before
if self._received_messages.qsize() > 0 and received == 0:
# No new messages arrived, but we have some - so return what we have.
expired = True
break
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
return batch | Receive a batch of messages. Messages returned in the batch have already been
accepted - if you wish to add logic to accept or reject messages based on custom
criteria, pass in a callback. This method will return as soon as some messages are
available rather than waiting to achieve a specific batch size, and therefore the
number of messages returned per call will vary up to the maximum allowed.
If the receive client is configured with `auto_complete=True` then the messages received
in the batch returned by this function will already be settled. Alternatively, if
`auto_complete=False`, then each message will need to be explicitly settled before
it expires and is released.
:param max_batch_size: The maximum number of messages that can be returned in
one call. This value cannot be larger than the prefetch value, and if not specified,
the prefetch value will be used.
:type max_batch_size: int
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
:param timeout: I timeout in milliseconds for which to wait to receive any messages.
If no messages are received in this time, an empty list will be returned. If set to
0, the client will continue to wait until at least one message is received. The
default is 0.
:type timeout: int | Below is the the instruction that describes the task:
### Input:
Receive a batch of messages. Messages returned in the batch have already been
accepted - if you wish to add logic to accept or reject messages based on custom
criteria, pass in a callback. This method will return as soon as some messages are
available rather than waiting to achieve a specific batch size, and therefore the
number of messages returned per call will vary up to the maximum allowed.
If the receive client is configured with `auto_complete=True` then the messages received
in the batch returned by this function will already be settled. Alternatively, if
`auto_complete=False`, then each message will need to be explicitly settled before
it expires and is released.
:param max_batch_size: The maximum number of messages that can be returned in
one call. This value cannot be larger than the prefetch value, and if not specified,
the prefetch value will be used.
:type max_batch_size: int
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
:param timeout: I timeout in milliseconds for which to wait to receive any messages.
If no messages are received in this time, an empty list will be returned. If set to
0, the client will continue to wait until at least one message is received. The
default is 0.
:type timeout: int
### Response:
def receive_message_batch(self, max_batch_size=None, on_message_received=None, timeout=0):
"""Receive a batch of messages. Messages returned in the batch have already been
accepted - if you wish to add logic to accept or reject messages based on custom
criteria, pass in a callback. This method will return as soon as some messages are
available rather than waiting to achieve a specific batch size, and therefore the
number of messages returned per call will vary up to the maximum allowed.
If the receive client is configured with `auto_complete=True` then the messages received
in the batch returned by this function will already be settled. Alternatively, if
`auto_complete=False`, then each message will need to be explicitly settled before
it expires and is released.
:param max_batch_size: The maximum number of messages that can be returned in
one call. This value cannot be larger than the prefetch value, and if not specified,
the prefetch value will be used.
:type max_batch_size: int
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
:param timeout: I timeout in milliseconds for which to wait to receive any messages.
If no messages are received in this time, an empty list will be returned. If set to
0, the client will continue to wait until at least one message is received. The
default is 0.
:type timeout: int
"""
self._message_received_callback = on_message_received
max_batch_size = max_batch_size or self._prefetch
if max_batch_size > self._prefetch:
raise ValueError(
'Maximum batch size cannot be greater than the '
'connection link credit: {}'.format(self._prefetch))
timeout = self._counter.get_current_ms() + timeout if timeout else 0
expired = False
self._received_messages = self._received_messages or compat.queue.Queue()
self.open()
receiving = True
batch = []
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
if len(batch) >= max_batch_size:
return batch
while receiving and not expired and len(batch) < max_batch_size:
while receiving and self._received_messages.qsize() < max_batch_size:
if timeout and self._counter.get_current_ms() > timeout:
expired = True
break
before = self._received_messages.qsize()
receiving = self.do_work()
received = self._received_messages.qsize() - before
if self._received_messages.qsize() > 0 and received == 0:
# No new messages arrived, but we have some - so return what we have.
expired = True
break
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
return batch |
def lambda_function(f):
"""
Decorator for making error handling on AWS Lambda easier
"""
@functools.wraps(f)
def wrapper(event, context):
global _CURRENT_LAMBDA_CONTEXT
_CURRENT_LAMBDA_CONTEXT = context
try:
result = f(event, context)
return wait(lambda: result)
except:
cls, exc, trace = sys.exc_info()
report_exc_info((cls, exc, trace.tb_next))
wait()
raise
return wrapper | Decorator for making error handling on AWS Lambda easier | Below is the the instruction that describes the task:
### Input:
Decorator for making error handling on AWS Lambda easier
### Response:
def lambda_function(f):
"""
Decorator for making error handling on AWS Lambda easier
"""
@functools.wraps(f)
def wrapper(event, context):
global _CURRENT_LAMBDA_CONTEXT
_CURRENT_LAMBDA_CONTEXT = context
try:
result = f(event, context)
return wait(lambda: result)
except:
cls, exc, trace = sys.exc_info()
report_exc_info((cls, exc, trace.tb_next))
wait()
raise
return wrapper |
def decode(self, bytes):
"""Decodes the given bytes according to this AIT Argument
Definition.
"""
value = self.type.decode(bytes)
if self._enum is not None:
for name, val in self._enum.items():
if value == val:
value = name
break
return value | Decodes the given bytes according to this AIT Argument
Definition. | Below is the the instruction that describes the task:
### Input:
Decodes the given bytes according to this AIT Argument
Definition.
### Response:
def decode(self, bytes):
"""Decodes the given bytes according to this AIT Argument
Definition.
"""
value = self.type.decode(bytes)
if self._enum is not None:
for name, val in self._enum.items():
if value == val:
value = name
break
return value |
def personality(self, category: str = 'mbti') -> Union[str, int]:
"""Generate a type of personality.
:param category: Category.
:return: Personality type.
:rtype: str or int
:Example:
ISFJ.
"""
mbtis = ('ISFJ', 'ISTJ', 'INFJ', 'INTJ',
'ISTP', 'ISFP', 'INFP', 'INTP',
'ESTP', 'ESFP', 'ENFP', 'ENTP',
'ESTJ', 'ESFJ', 'ENFJ', 'ENTJ')
if category.lower() == 'rheti':
return self.random.randint(1, 10)
return self.random.choice(mbtis) | Generate a type of personality.
:param category: Category.
:return: Personality type.
:rtype: str or int
:Example:
ISFJ. | Below is the the instruction that describes the task:
### Input:
Generate a type of personality.
:param category: Category.
:return: Personality type.
:rtype: str or int
:Example:
ISFJ.
### Response:
def personality(self, category: str = 'mbti') -> Union[str, int]:
"""Generate a type of personality.
:param category: Category.
:return: Personality type.
:rtype: str or int
:Example:
ISFJ.
"""
mbtis = ('ISFJ', 'ISTJ', 'INFJ', 'INTJ',
'ISTP', 'ISFP', 'INFP', 'INTP',
'ESTP', 'ESFP', 'ENFP', 'ENTP',
'ESTJ', 'ESFJ', 'ENFJ', 'ENTJ')
if category.lower() == 'rheti':
return self.random.randint(1, 10)
return self.random.choice(mbtis) |
def configure_alias(graph, ns, mappings):
"""
Register Alias endpoints for a resource object.
"""
convention = AliasConvention(graph)
convention.configure(ns, mappings) | Register Alias endpoints for a resource object. | Below is the the instruction that describes the task:
### Input:
Register Alias endpoints for a resource object.
### Response:
def configure_alias(graph, ns, mappings):
"""
Register Alias endpoints for a resource object.
"""
convention = AliasConvention(graph)
convention.configure(ns, mappings) |
def filter_service_by_host_bp_rule_label(label):
"""Filter for service
Filter on label
:param label: label to filter
:type label: str
:return: Filter
:rtype: bool
"""
def inner_filter(items):
"""Inner filter for service. Accept if label in service.host.labels"""
service = items["service"]
host = items["hosts"][service.host]
if service is None or host is None:
return False
return label in host.labels
return inner_filter | Filter for service
Filter on label
:param label: label to filter
:type label: str
:return: Filter
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Filter for service
Filter on label
:param label: label to filter
:type label: str
:return: Filter
:rtype: bool
### Response:
def filter_service_by_host_bp_rule_label(label):
"""Filter for service
Filter on label
:param label: label to filter
:type label: str
:return: Filter
:rtype: bool
"""
def inner_filter(items):
"""Inner filter for service. Accept if label in service.host.labels"""
service = items["service"]
host = items["hosts"][service.host]
if service is None or host is None:
return False
return label in host.labels
return inner_filter |
def calculateLocalElasticity(self, bp, frames=None, helical=False, unit='kT'):
r"""Calculate local elastic matrix or stiffness matrix for local DNA segment
.. note:: Here local DNA segment referred to less than 5 base-pair long.
In case of :ref:`base-step-image`: Shift (:math:`Dx`), Slide (:math:`Dy`), Rise (:math:`Dz`),
Tilt (:math:`\tau`), Roll (:math:`\rho`) and Twist (:math:`\omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{base-step} = \begin{bmatrix}
K_{Dx} & K_{Dx,Dy} & K_{Dx,Dz} & K_{Dx,\tau} & K_{Dx,\rho} & K_{Dx,\omega} \\
K_{Dx,Dy} & K_{Dy} & K_{Dy,Dz} & K_{Dy,\tau} & K_{Dy,\rho} & K_{Dy,\omega} \\
K_{Dx,Dz} & K_{Dy,Dz} & K_{Dz} & K_{Dz,\tau} & K_{Dz,\rho} & K_{Dz,\omega} \\
K_{Dx,\tau} & K_{Dy,\tau} & K_{Dz,\tau} & K_{\tau} & K_{\tau, \rho} & K_{\tau,\omega} \\
K_{Dx,\rho} & K_{Dy,\rho} & K_{Dz,\rho} & K_{\tau, \rho} & K_{\rho} & K_{\rho,\omega} \\
K_{Dx,\omega} & K_{Dy,\omega} & K_{Dz,\omega} & K_{\tau, \omega} & K_{\rho, \omega} & K_{\omega} \\
\end{bmatrix}
In case of :ref:`helical-base-step-image`: x-displacement (:math:`dx`), y-displacement (:math:`dy`), h-rise (:math:`h`),
inclination (:math:`\eta`), tip (:math:`\theta`) and twist (:math:`\Omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{helical-base-step} = \begin{bmatrix}
K_{dx} & K_{dx,dy} & K_{dx,h} & K_{dx,\eta} & K_{dx,\theta} & K_{dx,\Omega} \\
K_{dx,dy} & K_{dy} & K_{dy,h} & K_{dy,\eta} & K_{dy,\theta} & K_{dy,\Omega} \\
K_{dx,h} & K_{dy,h} & K_{h} & K_{h,\eta} & K_{h,\theta} & K_{h,\Omega} \\
K_{dx,\eta} & K_{dy,\eta} & K_{h,\eta} & K_{\eta} & K_{\eta, \theta} & K_{\eta,\Omega} \\
K_{dx,\theta} & K_{dy,\theta} & K_{h,\theta} & K_{\eta, \theta} & K_{\theta} & K_{\theta,\Omega} \\
K_{dx,\Omega} & K_{dy,\Omega} & K_{h,\Omega} & K_{\eta, \Omega} & K_{\theta, \Omega} & K_{\Omega} \\
\end{bmatrix}
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
Return
------
mean : numpy.ndarray
Value of parameters at which energy is zero. Minimum point on energy landscape.
if ``helical=False``
.. math::
\begin{bmatrix}
Dx_0 & Dy_0 & Dz_0 & \tau_0 & \rho_0 & \omega_0
\end{bmatrix}
if ``helical=True``
.. math::
\begin{bmatrix}
dx_0 & dy_0 & h_0 & \eta_0 & \theta_0 & \Omega_0
\end{bmatrix}
result : numpy.ndarray
Elastic matrix.
"""
acceptedUnit = ['kT', 'kJ/mol', 'kcal/mol']
if unit not in acceptedUnit:
raise ValueError(" {0} not accepted. Use any of the following: {1} ".format(unit, acceptedUnit))
frames = self._validateFrames(frames)
name = '{0}-{1}-{2}-{3}-local-{4}'.format(bp[0], bp[1], frames[0], frames[1], int(helical))
if bp[1]-bp[0]+1 > 4:
raise ValueError("Selected span {0} is larger than 4, and therefore, not recommended for local elasticity".format(bp[1]-bp[0]+1))
if name not in self.esMatrix:
time, array = self.extractLocalParameters(self.dna, bp, helical=helical, frames=frames)
mean = np.mean(array, axis = 1)
esMatrix = self.getElasticMatrix(array)
self.esMatrix[name] = esMatrix
self.minimumPoint[name] = mean
else:
esMatrix = self.esMatrix[name]
mean = self.minimumPoint[name]
if unit == 'kJ/mol':
result = 2.4946938107879997 * esMatrix # (1.38064852e-23 * 300 * 6.023e23 / 1000 ) kT.NA/1000
elif unit == 'kcal/mol':
result = 0.5962461306854684 * esMatrix # (1.38064852e-23 * 300 * 6.023e23 / 1000 / 4.184) kT.NA/1000
else:
result = esMatrix
return mean, result | r"""Calculate local elastic matrix or stiffness matrix for local DNA segment
.. note:: Here local DNA segment referred to less than 5 base-pair long.
In case of :ref:`base-step-image`: Shift (:math:`Dx`), Slide (:math:`Dy`), Rise (:math:`Dz`),
Tilt (:math:`\tau`), Roll (:math:`\rho`) and Twist (:math:`\omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{base-step} = \begin{bmatrix}
K_{Dx} & K_{Dx,Dy} & K_{Dx,Dz} & K_{Dx,\tau} & K_{Dx,\rho} & K_{Dx,\omega} \\
K_{Dx,Dy} & K_{Dy} & K_{Dy,Dz} & K_{Dy,\tau} & K_{Dy,\rho} & K_{Dy,\omega} \\
K_{Dx,Dz} & K_{Dy,Dz} & K_{Dz} & K_{Dz,\tau} & K_{Dz,\rho} & K_{Dz,\omega} \\
K_{Dx,\tau} & K_{Dy,\tau} & K_{Dz,\tau} & K_{\tau} & K_{\tau, \rho} & K_{\tau,\omega} \\
K_{Dx,\rho} & K_{Dy,\rho} & K_{Dz,\rho} & K_{\tau, \rho} & K_{\rho} & K_{\rho,\omega} \\
K_{Dx,\omega} & K_{Dy,\omega} & K_{Dz,\omega} & K_{\tau, \omega} & K_{\rho, \omega} & K_{\omega} \\
\end{bmatrix}
In case of :ref:`helical-base-step-image`: x-displacement (:math:`dx`), y-displacement (:math:`dy`), h-rise (:math:`h`),
inclination (:math:`\eta`), tip (:math:`\theta`) and twist (:math:`\Omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{helical-base-step} = \begin{bmatrix}
K_{dx} & K_{dx,dy} & K_{dx,h} & K_{dx,\eta} & K_{dx,\theta} & K_{dx,\Omega} \\
K_{dx,dy} & K_{dy} & K_{dy,h} & K_{dy,\eta} & K_{dy,\theta} & K_{dy,\Omega} \\
K_{dx,h} & K_{dy,h} & K_{h} & K_{h,\eta} & K_{h,\theta} & K_{h,\Omega} \\
K_{dx,\eta} & K_{dy,\eta} & K_{h,\eta} & K_{\eta} & K_{\eta, \theta} & K_{\eta,\Omega} \\
K_{dx,\theta} & K_{dy,\theta} & K_{h,\theta} & K_{\eta, \theta} & K_{\theta} & K_{\theta,\Omega} \\
K_{dx,\Omega} & K_{dy,\Omega} & K_{h,\Omega} & K_{\eta, \Omega} & K_{\theta, \Omega} & K_{\Omega} \\
\end{bmatrix}
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
Return
------
mean : numpy.ndarray
Value of parameters at which energy is zero. Minimum point on energy landscape.
if ``helical=False``
.. math::
\begin{bmatrix}
Dx_0 & Dy_0 & Dz_0 & \tau_0 & \rho_0 & \omega_0
\end{bmatrix}
if ``helical=True``
.. math::
\begin{bmatrix}
dx_0 & dy_0 & h_0 & \eta_0 & \theta_0 & \Omega_0
\end{bmatrix}
result : numpy.ndarray
Elastic matrix. | Below is the the instruction that describes the task:
### Input:
r"""Calculate local elastic matrix or stiffness matrix for local DNA segment
.. note:: Here local DNA segment referred to less than 5 base-pair long.
In case of :ref:`base-step-image`: Shift (:math:`Dx`), Slide (:math:`Dy`), Rise (:math:`Dz`),
Tilt (:math:`\tau`), Roll (:math:`\rho`) and Twist (:math:`\omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{base-step} = \begin{bmatrix}
K_{Dx} & K_{Dx,Dy} & K_{Dx,Dz} & K_{Dx,\tau} & K_{Dx,\rho} & K_{Dx,\omega} \\
K_{Dx,Dy} & K_{Dy} & K_{Dy,Dz} & K_{Dy,\tau} & K_{Dy,\rho} & K_{Dy,\omega} \\
K_{Dx,Dz} & K_{Dy,Dz} & K_{Dz} & K_{Dz,\tau} & K_{Dz,\rho} & K_{Dz,\omega} \\
K_{Dx,\tau} & K_{Dy,\tau} & K_{Dz,\tau} & K_{\tau} & K_{\tau, \rho} & K_{\tau,\omega} \\
K_{Dx,\rho} & K_{Dy,\rho} & K_{Dz,\rho} & K_{\tau, \rho} & K_{\rho} & K_{\rho,\omega} \\
K_{Dx,\omega} & K_{Dy,\omega} & K_{Dz,\omega} & K_{\tau, \omega} & K_{\rho, \omega} & K_{\omega} \\
\end{bmatrix}
In case of :ref:`helical-base-step-image`: x-displacement (:math:`dx`), y-displacement (:math:`dy`), h-rise (:math:`h`),
inclination (:math:`\eta`), tip (:math:`\theta`) and twist (:math:`\Omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{helical-base-step} = \begin{bmatrix}
K_{dx} & K_{dx,dy} & K_{dx,h} & K_{dx,\eta} & K_{dx,\theta} & K_{dx,\Omega} \\
K_{dx,dy} & K_{dy} & K_{dy,h} & K_{dy,\eta} & K_{dy,\theta} & K_{dy,\Omega} \\
K_{dx,h} & K_{dy,h} & K_{h} & K_{h,\eta} & K_{h,\theta} & K_{h,\Omega} \\
K_{dx,\eta} & K_{dy,\eta} & K_{h,\eta} & K_{\eta} & K_{\eta, \theta} & K_{\eta,\Omega} \\
K_{dx,\theta} & K_{dy,\theta} & K_{h,\theta} & K_{\eta, \theta} & K_{\theta} & K_{\theta,\Omega} \\
K_{dx,\Omega} & K_{dy,\Omega} & K_{h,\Omega} & K_{\eta, \Omega} & K_{\theta, \Omega} & K_{\Omega} \\
\end{bmatrix}
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
Return
------
mean : numpy.ndarray
Value of parameters at which energy is zero. Minimum point on energy landscape.
if ``helical=False``
.. math::
\begin{bmatrix}
Dx_0 & Dy_0 & Dz_0 & \tau_0 & \rho_0 & \omega_0
\end{bmatrix}
if ``helical=True``
.. math::
\begin{bmatrix}
dx_0 & dy_0 & h_0 & \eta_0 & \theta_0 & \Omega_0
\end{bmatrix}
result : numpy.ndarray
Elastic matrix.
### Response:
def calculateLocalElasticity(self, bp, frames=None, helical=False, unit='kT'):
r"""Calculate local elastic matrix or stiffness matrix for local DNA segment
.. note:: Here local DNA segment referred to less than 5 base-pair long.
In case of :ref:`base-step-image`: Shift (:math:`Dx`), Slide (:math:`Dy`), Rise (:math:`Dz`),
Tilt (:math:`\tau`), Roll (:math:`\rho`) and Twist (:math:`\omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{base-step} = \begin{bmatrix}
K_{Dx} & K_{Dx,Dy} & K_{Dx,Dz} & K_{Dx,\tau} & K_{Dx,\rho} & K_{Dx,\omega} \\
K_{Dx,Dy} & K_{Dy} & K_{Dy,Dz} & K_{Dy,\tau} & K_{Dy,\rho} & K_{Dy,\omega} \\
K_{Dx,Dz} & K_{Dy,Dz} & K_{Dz} & K_{Dz,\tau} & K_{Dz,\rho} & K_{Dz,\omega} \\
K_{Dx,\tau} & K_{Dy,\tau} & K_{Dz,\tau} & K_{\tau} & K_{\tau, \rho} & K_{\tau,\omega} \\
K_{Dx,\rho} & K_{Dy,\rho} & K_{Dz,\rho} & K_{\tau, \rho} & K_{\rho} & K_{\rho,\omega} \\
K_{Dx,\omega} & K_{Dy,\omega} & K_{Dz,\omega} & K_{\tau, \omega} & K_{\rho, \omega} & K_{\omega} \\
\end{bmatrix}
In case of :ref:`helical-base-step-image`: x-displacement (:math:`dx`), y-displacement (:math:`dy`), h-rise (:math:`h`),
inclination (:math:`\eta`), tip (:math:`\theta`) and twist (:math:`\Omega`), following elastic matrix is calculated.
.. math::
\mathbf{K}_{helical-base-step} = \begin{bmatrix}
K_{dx} & K_{dx,dy} & K_{dx,h} & K_{dx,\eta} & K_{dx,\theta} & K_{dx,\Omega} \\
K_{dx,dy} & K_{dy} & K_{dy,h} & K_{dy,\eta} & K_{dy,\theta} & K_{dy,\Omega} \\
K_{dx,h} & K_{dy,h} & K_{h} & K_{h,\eta} & K_{h,\theta} & K_{h,\Omega} \\
K_{dx,\eta} & K_{dy,\eta} & K_{h,\eta} & K_{\eta} & K_{\eta, \theta} & K_{\eta,\Omega} \\
K_{dx,\theta} & K_{dy,\theta} & K_{h,\theta} & K_{\eta, \theta} & K_{\theta} & K_{\theta,\Omega} \\
K_{dx,\Omega} & K_{dy,\Omega} & K_{h,\Omega} & K_{\eta, \Omega} & K_{\theta, \Omega} & K_{\Omega} \\
\end{bmatrix}
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
frames : list
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
Return
------
mean : numpy.ndarray
Value of parameters at which energy is zero. Minimum point on energy landscape.
if ``helical=False``
.. math::
\begin{bmatrix}
Dx_0 & Dy_0 & Dz_0 & \tau_0 & \rho_0 & \omega_0
\end{bmatrix}
if ``helical=True``
.. math::
\begin{bmatrix}
dx_0 & dy_0 & h_0 & \eta_0 & \theta_0 & \Omega_0
\end{bmatrix}
result : numpy.ndarray
Elastic matrix.
"""
acceptedUnit = ['kT', 'kJ/mol', 'kcal/mol']
if unit not in acceptedUnit:
raise ValueError(" {0} not accepted. Use any of the following: {1} ".format(unit, acceptedUnit))
frames = self._validateFrames(frames)
name = '{0}-{1}-{2}-{3}-local-{4}'.format(bp[0], bp[1], frames[0], frames[1], int(helical))
if bp[1]-bp[0]+1 > 4:
raise ValueError("Selected span {0} is larger than 4, and therefore, not recommended for local elasticity".format(bp[1]-bp[0]+1))
if name not in self.esMatrix:
time, array = self.extractLocalParameters(self.dna, bp, helical=helical, frames=frames)
mean = np.mean(array, axis = 1)
esMatrix = self.getElasticMatrix(array)
self.esMatrix[name] = esMatrix
self.minimumPoint[name] = mean
else:
esMatrix = self.esMatrix[name]
mean = self.minimumPoint[name]
if unit == 'kJ/mol':
result = 2.4946938107879997 * esMatrix # (1.38064852e-23 * 300 * 6.023e23 / 1000 ) kT.NA/1000
elif unit == 'kcal/mol':
result = 0.5962461306854684 * esMatrix # (1.38064852e-23 * 300 * 6.023e23 / 1000 / 4.184) kT.NA/1000
else:
result = esMatrix
return mean, result |
def queue_manager_stats(self, queue_manager, tags):
"""
Get stats from the queue manager
"""
for mname, pymqi_value in iteritems(metrics.queue_manager_metrics()):
try:
m = queue_manager.inquire(pymqi_value)
mname = '{}.queue_manager.{}'.format(self.METRIC_PREFIX, mname)
self.gauge(mname, m, tags=tags)
self.service_check(self.QUEUE_MANAGER_SERVICE_CHECK, AgentCheck.OK, tags)
except pymqi.Error as e:
self.warning("Error getting queue manager stats: {}".format(e))
self.service_check(self.QUEUE_MANAGER_SERVICE_CHECK, AgentCheck.CRITICAL, tags) | Get stats from the queue manager | Below is the the instruction that describes the task:
### Input:
Get stats from the queue manager
### Response:
def queue_manager_stats(self, queue_manager, tags):
"""
Get stats from the queue manager
"""
for mname, pymqi_value in iteritems(metrics.queue_manager_metrics()):
try:
m = queue_manager.inquire(pymqi_value)
mname = '{}.queue_manager.{}'.format(self.METRIC_PREFIX, mname)
self.gauge(mname, m, tags=tags)
self.service_check(self.QUEUE_MANAGER_SERVICE_CHECK, AgentCheck.OK, tags)
except pymqi.Error as e:
self.warning("Error getting queue manager stats: {}".format(e))
self.service_check(self.QUEUE_MANAGER_SERVICE_CHECK, AgentCheck.CRITICAL, tags) |
def get_alignment_df(a_aln_seq, b_aln_seq, a_seq_id=None, b_seq_id=None):
"""Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment
"""
if len(a_aln_seq) != len(b_aln_seq):
raise ValueError('Sequence lengths not equal - was an alignment run?')
if not a_seq_id:
a_seq_id = 'a_seq'
if not b_seq_id:
b_seq_id = 'b_seq'
a_aln_seq = ssbio.protein.sequence.utils.cast_to_str(a_aln_seq)
b_aln_seq = ssbio.protein.sequence.utils.cast_to_str(b_aln_seq)
a_idx = 1
b_idx = 1
appender = []
for i, (a,b) in enumerate(zip(a_aln_seq, b_aln_seq)):
to_append = {}
if a == b and a != '-' and b != '-':
aa_flag = 'match'
elif a != b and a == '-' and b != '-':
aa_flag = 'insertion'
elif a != b and a != '-' and b == '-':
aa_flag = 'deletion'
elif a != b and a != '-' and b == 'X':
aa_flag = 'unresolved'
elif a != b and b != '-' and a == 'X':
aa_flag = 'unresolved'
elif a != b and a != '-' and b != '-':
aa_flag = 'mutation'
to_append['id_a'] = a_seq_id
to_append['id_b'] = b_seq_id
to_append['type'] = aa_flag
if aa_flag == 'match' or aa_flag == 'unresolved' or aa_flag == 'mutation':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
a_idx += 1
b_idx += 1
if aa_flag == 'deletion':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
a_idx += 1
if aa_flag == 'insertion':
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
b_idx += 1
appender.append(to_append)
cols = ['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos']
alignment_df = pd.DataFrame.from_records(appender, columns=cols)
alignment_df = alignment_df.fillna(value=np.nan)
return alignment_df | Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment | Below is the the instruction that describes the task:
### Input:
Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment
### Response:
def get_alignment_df(a_aln_seq, b_aln_seq, a_seq_id=None, b_seq_id=None):
"""Summarize two alignment strings in a dataframe.
Args:
a_aln_seq (str): Aligned sequence string
b_aln_seq (str): Aligned sequence string
a_seq_id (str): Optional ID of a_seq
b_seq_id (str): Optional ID of b_aln_seq
Returns:
DataFrame: a per-residue level annotation of the alignment
"""
if len(a_aln_seq) != len(b_aln_seq):
raise ValueError('Sequence lengths not equal - was an alignment run?')
if not a_seq_id:
a_seq_id = 'a_seq'
if not b_seq_id:
b_seq_id = 'b_seq'
a_aln_seq = ssbio.protein.sequence.utils.cast_to_str(a_aln_seq)
b_aln_seq = ssbio.protein.sequence.utils.cast_to_str(b_aln_seq)
a_idx = 1
b_idx = 1
appender = []
for i, (a,b) in enumerate(zip(a_aln_seq, b_aln_seq)):
to_append = {}
if a == b and a != '-' and b != '-':
aa_flag = 'match'
elif a != b and a == '-' and b != '-':
aa_flag = 'insertion'
elif a != b and a != '-' and b == '-':
aa_flag = 'deletion'
elif a != b and a != '-' and b == 'X':
aa_flag = 'unresolved'
elif a != b and b != '-' and a == 'X':
aa_flag = 'unresolved'
elif a != b and a != '-' and b != '-':
aa_flag = 'mutation'
to_append['id_a'] = a_seq_id
to_append['id_b'] = b_seq_id
to_append['type'] = aa_flag
if aa_flag == 'match' or aa_flag == 'unresolved' or aa_flag == 'mutation':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
a_idx += 1
b_idx += 1
if aa_flag == 'deletion':
to_append['id_a_aa'] = a
to_append['id_a_pos'] = int(a_idx)
a_idx += 1
if aa_flag == 'insertion':
to_append['id_b_aa'] = b
to_append['id_b_pos'] = int(b_idx)
b_idx += 1
appender.append(to_append)
cols = ['id_a', 'id_b', 'type', 'id_a_aa', 'id_a_pos', 'id_b_aa', 'id_b_pos']
alignment_df = pd.DataFrame.from_records(appender, columns=cols)
alignment_df = alignment_df.fillna(value=np.nan)
return alignment_df |
def parse_args(spectypes):
"""
Return arguments object formed by parsing the command line used to launch
the program.
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"-c", "--constants",
help="emit constants instead of spec dict",
action="store_true"
)
arg_parser.add_argument(
"spectype",
help="specifies the spec type to be generated",
choices=spectypes
)
return arg_parser.parse_args() | Return arguments object formed by parsing the command line used to launch
the program. | Below is the the instruction that describes the task:
### Input:
Return arguments object formed by parsing the command line used to launch
the program.
### Response:
def parse_args(spectypes):
"""
Return arguments object formed by parsing the command line used to launch
the program.
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument(
"-c", "--constants",
help="emit constants instead of spec dict",
action="store_true"
)
arg_parser.add_argument(
"spectype",
help="specifies the spec type to be generated",
choices=spectypes
)
return arg_parser.parse_args() |
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
"""Visualizes keypoints (adapted from vis_one_image).
kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
"""
dataset_keypoints = PersonKeypoints.NAMES
kp_lines = PersonKeypoints.CONNECTIONS
# Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]
# Perform the drawing on a copy of the image, to allow for blending.
kp_mask = np.copy(img)
# Draw mid shoulder / mid hip first for better visualization.
mid_shoulder = (
kps[:2, dataset_keypoints.index('right_shoulder')] +
kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
sc_mid_shoulder = np.minimum(
kps[2, dataset_keypoints.index('right_shoulder')],
kps[2, dataset_keypoints.index('left_shoulder')])
mid_hip = (
kps[:2, dataset_keypoints.index('right_hip')] +
kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
sc_mid_hip = np.minimum(
kps[2, dataset_keypoints.index('right_hip')],
kps[2, dataset_keypoints.index('left_hip')])
nose_idx = dataset_keypoints.index('nose')
if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),
color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)
if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(mid_hip),
color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)
# Draw the keypoints.
for l in range(len(kp_lines)):
i1 = kp_lines[l][0]
i2 = kp_lines[l][1]
p1 = kps[0, i1], kps[1, i1]
p2 = kps[0, i2], kps[1, i2]
if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
cv2.line(
kp_mask, p1, p2,
color=colors[l], thickness=2, lineType=cv2.LINE_AA)
if kps[2, i1] > kp_thresh:
cv2.circle(
kp_mask, p1,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
if kps[2, i2] > kp_thresh:
cv2.circle(
kp_mask, p2,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
# Blend the keypoints.
return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) | Visualizes keypoints (adapted from vis_one_image).
kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob). | Below is the the instruction that describes the task:
### Input:
Visualizes keypoints (adapted from vis_one_image).
kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
### Response:
def vis_keypoints(img, kps, kp_thresh=2, alpha=0.7):
"""Visualizes keypoints (adapted from vis_one_image).
kps has shape (4, #keypoints) where 4 rows are (x, y, logit, prob).
"""
dataset_keypoints = PersonKeypoints.NAMES
kp_lines = PersonKeypoints.CONNECTIONS
# Convert from plt 0-1 RGBA colors to 0-255 BGR colors for opencv.
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
colors = [(c[2] * 255, c[1] * 255, c[0] * 255) for c in colors]
# Perform the drawing on a copy of the image, to allow for blending.
kp_mask = np.copy(img)
# Draw mid shoulder / mid hip first for better visualization.
mid_shoulder = (
kps[:2, dataset_keypoints.index('right_shoulder')] +
kps[:2, dataset_keypoints.index('left_shoulder')]) / 2.0
sc_mid_shoulder = np.minimum(
kps[2, dataset_keypoints.index('right_shoulder')],
kps[2, dataset_keypoints.index('left_shoulder')])
mid_hip = (
kps[:2, dataset_keypoints.index('right_hip')] +
kps[:2, dataset_keypoints.index('left_hip')]) / 2.0
sc_mid_hip = np.minimum(
kps[2, dataset_keypoints.index('right_hip')],
kps[2, dataset_keypoints.index('left_hip')])
nose_idx = dataset_keypoints.index('nose')
if sc_mid_shoulder > kp_thresh and kps[2, nose_idx] > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(kps[:2, nose_idx]),
color=colors[len(kp_lines)], thickness=2, lineType=cv2.LINE_AA)
if sc_mid_shoulder > kp_thresh and sc_mid_hip > kp_thresh:
cv2.line(
kp_mask, tuple(mid_shoulder), tuple(mid_hip),
color=colors[len(kp_lines) + 1], thickness=2, lineType=cv2.LINE_AA)
# Draw the keypoints.
for l in range(len(kp_lines)):
i1 = kp_lines[l][0]
i2 = kp_lines[l][1]
p1 = kps[0, i1], kps[1, i1]
p2 = kps[0, i2], kps[1, i2]
if kps[2, i1] > kp_thresh and kps[2, i2] > kp_thresh:
cv2.line(
kp_mask, p1, p2,
color=colors[l], thickness=2, lineType=cv2.LINE_AA)
if kps[2, i1] > kp_thresh:
cv2.circle(
kp_mask, p1,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
if kps[2, i2] > kp_thresh:
cv2.circle(
kp_mask, p2,
radius=3, color=colors[l], thickness=-1, lineType=cv2.LINE_AA)
# Blend the keypoints.
return cv2.addWeighted(img, 1.0 - alpha, kp_mask, alpha, 0) |
def throw_invalid_quad_params(quad, QUADS, nparams):
""" Exception raised when an invalid number of params in the
quad code has been emmitted.
"""
raise InvalidICError(str(quad),
"Invalid quad code params for '%s' (expected %i, but got %i)" %
(quad, QUADS[quad][0], nparams)
) | Exception raised when an invalid number of params in the
quad code has been emmitted. | Below is the the instruction that describes the task:
### Input:
Exception raised when an invalid number of params in the
quad code has been emmitted.
### Response:
def throw_invalid_quad_params(quad, QUADS, nparams):
""" Exception raised when an invalid number of params in the
quad code has been emmitted.
"""
raise InvalidICError(str(quad),
"Invalid quad code params for '%s' (expected %i, but got %i)" %
(quad, QUADS[quad][0], nparams)
) |
def create_index(idx_url, clean=False):
"""Configure the index to work with"""
try:
r = requests.get(idx_url)
except requests.exceptions.ConnectionError:
cause = "Error connecting to Elastic Search (index: %s)" % idx_url
raise ElasticSearchError(cause=cause)
if r.status_code != 200:
# The index does not exist
r = requests.put(idx_url)
if r.status_code != 200:
logger.info("Can't create index %s (%s)", idx_url, r.status_code)
cause = "Error creating Elastic Search index %s" % idx_url
raise ElasticSearchError(cause=cause)
logger.info("Index %s created", idx_url)
return True
elif r.status_code == 200 and clean:
requests.delete(idx_url)
requests.put(idx_url)
logger.info("Index deleted and created (index: %s)", idx_url)
return True
return False | Configure the index to work with | Below is the the instruction that describes the task:
### Input:
Configure the index to work with
### Response:
def create_index(idx_url, clean=False):
"""Configure the index to work with"""
try:
r = requests.get(idx_url)
except requests.exceptions.ConnectionError:
cause = "Error connecting to Elastic Search (index: %s)" % idx_url
raise ElasticSearchError(cause=cause)
if r.status_code != 200:
# The index does not exist
r = requests.put(idx_url)
if r.status_code != 200:
logger.info("Can't create index %s (%s)", idx_url, r.status_code)
cause = "Error creating Elastic Search index %s" % idx_url
raise ElasticSearchError(cause=cause)
logger.info("Index %s created", idx_url)
return True
elif r.status_code == 200 and clean:
requests.delete(idx_url)
requests.put(idx_url)
logger.info("Index deleted and created (index: %s)", idx_url)
return True
return False |
def sample_rwalk(args):
"""
Return a new live point proposed by random walking away from an
existing live point.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For random walks new positions are
proposed using the :class:`~dynesty.bounding.Ellipsoid` whose
shape is defined by axes.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`.
"""
# Unzipping.
(u, loglstar, axes, scale,
prior_transform, loglikelihood, kwargs) = args
rstate = np.random
# Periodicity.
nonperiodic = kwargs.get('nonperiodic', None)
# Setup.
n = len(u)
walks = kwargs.get('walks', 25) # number of steps
accept = 0
reject = 0
fail = 0
nfail = 0
nc = 0
ncall = 0
drhat, dr, du, u_prop, logl_prop = np.nan, np.nan, np.nan, np.nan, np.nan
while nc < walks or accept == 0:
while True:
# Check scale-factor.
if scale == 0.:
raise RuntimeError("The random walk sampling is stuck! "
"Some useful output quantities:\n"
"u: {0}\n"
"drhat: {1}\n"
"dr: {2}\n"
"du: {3}\n"
"u_prop: {4}\n"
"loglstar: {5}\n"
"logl_prop: {6}\n"
"axes: {7}\n"
"scale: {8}."
.format(u, drhat, dr, du, u_prop,
loglstar, logl_prop, axes, scale))
# Propose a direction on the unit n-sphere.
drhat = rstate.randn(n)
drhat /= linalg.norm(drhat)
# Scale based on dimensionality.
dr = drhat * rstate.rand()**(1./n)
# Transform to proposal distribution.
du = np.dot(axes, dr)
u_prop = u + scale * du
# Check unit cube constraints.
if unitcheck(u_prop, nonperiodic):
break
else:
fail += 1
nfail += 1
# Check if we're stuck generating bad numbers.
if fail > 100 * walks:
warnings.warn("Random number generation appears to be "
"extremely inefficient. Adjusting the "
"scale-factor accordingly.")
fail = 0
scale *= math.exp(-1. / n)
# Check proposed point.
v_prop = prior_transform(np.array(u_prop))
logl_prop = loglikelihood(np.array(v_prop))
if logl_prop >= loglstar:
u = u_prop
v = v_prop
logl = logl_prop
accept += 1
else:
reject += 1
nc += 1
ncall += 1
# Check if we're stuck generating bad points.
if nc > 50 * walks:
scale *= math.exp(-1. / n)
warnings.warn("Random walk proposals appear to be "
"extremely inefficient. Adjusting the "
"scale-factor accordingly.")
nc, accept, reject = 0, 0, 0 # reset values
blob = {'accept': accept, 'reject': reject, 'fail': nfail, 'scale': scale}
return u, v, logl, ncall, blob | Return a new live point proposed by random walking away from an
existing live point.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For random walks new positions are
proposed using the :class:`~dynesty.bounding.Ellipsoid` whose
shape is defined by axes.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`. | Below is the the instruction that describes the task:
### Input:
Return a new live point proposed by random walking away from an
existing live point.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For random walks new positions are
proposed using the :class:`~dynesty.bounding.Ellipsoid` whose
shape is defined by axes.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`.
### Response:
def sample_rwalk(args):
"""
Return a new live point proposed by random walking away from an
existing live point.
Parameters
----------
u : `~numpy.ndarray` with shape (npdim,)
Position of the initial sample. **This is a copy of an existing live
point.**
loglstar : float
Ln(likelihood) bound.
axes : `~numpy.ndarray` with shape (ndim, ndim)
Axes used to propose new points. For random walks new positions are
proposed using the :class:`~dynesty.bounding.Ellipsoid` whose
shape is defined by axes.
scale : float
Value used to scale the provided axes.
prior_transform : function
Function transforming a sample from the a unit cube to the parameter
space of interest according to the prior.
loglikelihood : function
Function returning ln(likelihood) given parameters as a 1-d `~numpy`
array of length `ndim`.
kwargs : dict
A dictionary of additional method-specific parameters.
Returns
-------
u : `~numpy.ndarray` with shape (npdim,)
Position of the final proposed point within the unit cube.
v : `~numpy.ndarray` with shape (ndim,)
Position of the final proposed point in the target parameter space.
logl : float
Ln(likelihood) of the final proposed point.
nc : int
Number of function calls used to generate the sample.
blob : dict
Collection of ancillary quantities used to tune :data:`scale`.
"""
# Unzipping.
(u, loglstar, axes, scale,
prior_transform, loglikelihood, kwargs) = args
rstate = np.random
# Periodicity.
nonperiodic = kwargs.get('nonperiodic', None)
# Setup.
n = len(u)
walks = kwargs.get('walks', 25) # number of steps
accept = 0
reject = 0
fail = 0
nfail = 0
nc = 0
ncall = 0
drhat, dr, du, u_prop, logl_prop = np.nan, np.nan, np.nan, np.nan, np.nan
while nc < walks or accept == 0:
while True:
# Check scale-factor.
if scale == 0.:
raise RuntimeError("The random walk sampling is stuck! "
"Some useful output quantities:\n"
"u: {0}\n"
"drhat: {1}\n"
"dr: {2}\n"
"du: {3}\n"
"u_prop: {4}\n"
"loglstar: {5}\n"
"logl_prop: {6}\n"
"axes: {7}\n"
"scale: {8}."
.format(u, drhat, dr, du, u_prop,
loglstar, logl_prop, axes, scale))
# Propose a direction on the unit n-sphere.
drhat = rstate.randn(n)
drhat /= linalg.norm(drhat)
# Scale based on dimensionality.
dr = drhat * rstate.rand()**(1./n)
# Transform to proposal distribution.
du = np.dot(axes, dr)
u_prop = u + scale * du
# Check unit cube constraints.
if unitcheck(u_prop, nonperiodic):
break
else:
fail += 1
nfail += 1
# Check if we're stuck generating bad numbers.
if fail > 100 * walks:
warnings.warn("Random number generation appears to be "
"extremely inefficient. Adjusting the "
"scale-factor accordingly.")
fail = 0
scale *= math.exp(-1. / n)
# Check proposed point.
v_prop = prior_transform(np.array(u_prop))
logl_prop = loglikelihood(np.array(v_prop))
if logl_prop >= loglstar:
u = u_prop
v = v_prop
logl = logl_prop
accept += 1
else:
reject += 1
nc += 1
ncall += 1
# Check if we're stuck generating bad points.
if nc > 50 * walks:
scale *= math.exp(-1. / n)
warnings.warn("Random walk proposals appear to be "
"extremely inefficient. Adjusting the "
"scale-factor accordingly.")
nc, accept, reject = 0, 0, 0 # reset values
blob = {'accept': accept, 'reject': reject, 'fail': nfail, 'scale': scale}
return u, v, logl, ncall, blob |
def SmartStubAdapter(host='localhost', port=443, path='/sdk',
url=None, sock=None, poolSize=5,
certFile=None, certKeyFile=None,
httpProxyHost=None, httpProxyPort=80, sslProxyPath=None,
thumbprint=None, cacertsFile=None, preferredApiVersions=None,
acceptCompressedResponses=True,
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
samlToken=None, sslContext=None):
"""
Determine the most preferred API version supported by the specified server,
then create a soap stub adapter using that version
The parameters are the same as for pyVmomi.SoapStubAdapter except for
version which is renamed to prefferedApiVersions
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
sslContext = localSslFixup(host, sslContext)
supportedVersion = __FindSupportedVersion('https' if port > 0 else 'http',
host,
port,
path,
preferredApiVersions,
sslContext)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
return SoapStubAdapter(host=host, port=port, path=path,
url=url, sock=sock, poolSize=poolSize,
certFile=certFile, certKeyFile=certKeyFile,
httpProxyHost=httpProxyHost, httpProxyPort=httpProxyPort,
sslProxyPath=sslProxyPath, thumbprint=thumbprint,
cacertsFile=cacertsFile, version=supportedVersion,
acceptCompressedResponses=acceptCompressedResponses,
connectionPoolTimeout=connectionPoolTimeout,
samlToken=samlToken, sslContext=sslContext) | Determine the most preferred API version supported by the specified server,
then create a soap stub adapter using that version
The parameters are the same as for pyVmomi.SoapStubAdapter except for
version which is renamed to prefferedApiVersions
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list | Below is the the instruction that describes the task:
### Input:
Determine the most preferred API version supported by the specified server,
then create a soap stub adapter using that version
The parameters are the same as for pyVmomi.SoapStubAdapter except for
version which is renamed to prefferedApiVersions
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
### Response:
def SmartStubAdapter(host='localhost', port=443, path='/sdk',
url=None, sock=None, poolSize=5,
certFile=None, certKeyFile=None,
httpProxyHost=None, httpProxyPort=80, sslProxyPath=None,
thumbprint=None, cacertsFile=None, preferredApiVersions=None,
acceptCompressedResponses=True,
connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC,
samlToken=None, sslContext=None):
"""
Determine the most preferred API version supported by the specified server,
then create a soap stub adapter using that version
The parameters are the same as for pyVmomi.SoapStubAdapter except for
version which is renamed to prefferedApiVersions
@param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version3)
If a list of versions is specified the versions should
be ordered from most to least preferred. If None is
specified, the list of versions support by pyVmomi will
be used.
@type preferredApiVersions: string or string list
"""
if preferredApiVersions is None:
preferredApiVersions = GetServiceVersions('vim25')
sslContext = localSslFixup(host, sslContext)
supportedVersion = __FindSupportedVersion('https' if port > 0 else 'http',
host,
port,
path,
preferredApiVersions,
sslContext)
if supportedVersion is None:
raise Exception("%s:%s is not a VIM server" % (host, port))
return SoapStubAdapter(host=host, port=port, path=path,
url=url, sock=sock, poolSize=poolSize,
certFile=certFile, certKeyFile=certKeyFile,
httpProxyHost=httpProxyHost, httpProxyPort=httpProxyPort,
sslProxyPath=sslProxyPath, thumbprint=thumbprint,
cacertsFile=cacertsFile, version=supportedVersion,
acceptCompressedResponses=acceptCompressedResponses,
connectionPoolTimeout=connectionPoolTimeout,
samlToken=samlToken, sslContext=sslContext) |
def request(self, name, content={}, namespace=None):
""" Do a SOAP request and returns the result.
Simple wrapper arround pythonzimbra functions
:param name: ex: 'Auth' for performing an 'AuthRequest'
:param content: a dict formatted pythonzimbra-style for request
:param namespace: (optional), the namespace, if different from the
client's
:returns: a dict with response
"""
if not namespace:
namespace = self.NAMESPACE
req_name = name+'Request'
resp_name = name+'Response'
req = pythonzimbra.request_xml.RequestXml()
resp = pythonzimbra.response_xml.ResponseXml()
if self._session.is_logged_in():
req.set_auth_token(self._session.authToken)
req.add_request(req_name, content, namespace)
try:
self.com.send_request(req, resp)
except HTTPError as e:
if resp:
raise ZimbraSoapServerError(e.req, e.resp)
else:
raise
try:
resp_content = resp.get_response()
return resp_content[resp_name]
except KeyError:
if 'Fault' in resp_content:
raise ZimbraSoapServerError(req, resp)
raise ZimbraSoapUnexpectedResponse(
req, resp, 'Cannot find {} in response "{}"'.format(
resp_name, resp.get_response()))
return resp_content | Do a SOAP request and returns the result.
Simple wrapper arround pythonzimbra functions
:param name: ex: 'Auth' for performing an 'AuthRequest'
:param content: a dict formatted pythonzimbra-style for request
:param namespace: (optional), the namespace, if different from the
client's
:returns: a dict with response | Below is the the instruction that describes the task:
### Input:
Do a SOAP request and returns the result.
Simple wrapper arround pythonzimbra functions
:param name: ex: 'Auth' for performing an 'AuthRequest'
:param content: a dict formatted pythonzimbra-style for request
:param namespace: (optional), the namespace, if different from the
client's
:returns: a dict with response
### Response:
def request(self, name, content={}, namespace=None):
""" Do a SOAP request and returns the result.
Simple wrapper arround pythonzimbra functions
:param name: ex: 'Auth' for performing an 'AuthRequest'
:param content: a dict formatted pythonzimbra-style for request
:param namespace: (optional), the namespace, if different from the
client's
:returns: a dict with response
"""
if not namespace:
namespace = self.NAMESPACE
req_name = name+'Request'
resp_name = name+'Response'
req = pythonzimbra.request_xml.RequestXml()
resp = pythonzimbra.response_xml.ResponseXml()
if self._session.is_logged_in():
req.set_auth_token(self._session.authToken)
req.add_request(req_name, content, namespace)
try:
self.com.send_request(req, resp)
except HTTPError as e:
if resp:
raise ZimbraSoapServerError(e.req, e.resp)
else:
raise
try:
resp_content = resp.get_response()
return resp_content[resp_name]
except KeyError:
if 'Fault' in resp_content:
raise ZimbraSoapServerError(req, resp)
raise ZimbraSoapUnexpectedResponse(
req, resp, 'Cannot find {} in response "{}"'.format(
resp_name, resp.get_response()))
return resp_content |
def GetCountStopTimes(self):
"""Return the number of stops made by this trip."""
cursor = self._schedule._connection.cursor()
cursor.execute(
'SELECT count(*) FROM stop_times WHERE trip_id=?', (self.trip_id,))
return cursor.fetchone()[0] | Return the number of stops made by this trip. | Below is the the instruction that describes the task:
### Input:
Return the number of stops made by this trip.
### Response:
def GetCountStopTimes(self):
"""Return the number of stops made by this trip."""
cursor = self._schedule._connection.cursor()
cursor.execute(
'SELECT count(*) FROM stop_times WHERE trip_id=?', (self.trip_id,))
return cursor.fetchone()[0] |
def load_friends(self):
"""Fetches the MAL user friends page and sets the current user's friends attributes.
:rtype: :class:`.User`
:return: Current user object.
"""
user_friends = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username) + u'/friends').text
self.set(self.parse_friends(utilities.get_clean_dom(user_friends)))
return self | Fetches the MAL user friends page and sets the current user's friends attributes.
:rtype: :class:`.User`
:return: Current user object. | Below is the the instruction that describes the task:
### Input:
Fetches the MAL user friends page and sets the current user's friends attributes.
:rtype: :class:`.User`
:return: Current user object.
### Response:
def load_friends(self):
"""Fetches the MAL user friends page and sets the current user's friends attributes.
:rtype: :class:`.User`
:return: Current user object.
"""
user_friends = self.session.session.get(u'http://myanimelist.net/profile/' + utilities.urlencode(self.username) + u'/friends').text
self.set(self.parse_friends(utilities.get_clean_dom(user_friends)))
return self |
def do_import(self):
"""Handle import files or streams passed with '-i'."""
tmp_import = None
try:
if self.args[0].startswith('@') and self.args[0] != '@-':
import_file = os.path.expanduser(self.args[0][1:])
if not os.path.isfile(import_file):
self.parser.error("File not found (or not a file): {}".format(import_file))
args = (xmlrpc.NOHASH, os.path.abspath(import_file))
else:
script_text = '\n'.join(self.args + [''])
if script_text == '@-\n':
script_text = sys.stdin.read()
with tempfile.NamedTemporaryFile(suffix='.rc', prefix='rtxmlrpc-', delete=False) as handle:
handle.write(script_text)
tmp_import = handle.name
args = (xmlrpc.NOHASH, tmp_import)
self.execute(self.open(), 'import', args)
finally:
if tmp_import and os.path.exists(tmp_import):
os.remove(tmp_import) | Handle import files or streams passed with '-i'. | Below is the the instruction that describes the task:
### Input:
Handle import files or streams passed with '-i'.
### Response:
def do_import(self):
"""Handle import files or streams passed with '-i'."""
tmp_import = None
try:
if self.args[0].startswith('@') and self.args[0] != '@-':
import_file = os.path.expanduser(self.args[0][1:])
if not os.path.isfile(import_file):
self.parser.error("File not found (or not a file): {}".format(import_file))
args = (xmlrpc.NOHASH, os.path.abspath(import_file))
else:
script_text = '\n'.join(self.args + [''])
if script_text == '@-\n':
script_text = sys.stdin.read()
with tempfile.NamedTemporaryFile(suffix='.rc', prefix='rtxmlrpc-', delete=False) as handle:
handle.write(script_text)
tmp_import = handle.name
args = (xmlrpc.NOHASH, tmp_import)
self.execute(self.open(), 'import', args)
finally:
if tmp_import and os.path.exists(tmp_import):
os.remove(tmp_import) |
def _get_private_key(cls, private_key_path, private_key_passphrase):
"""Get Snowflake private key by path or None."""
if private_key_path is None or private_key_passphrase is None:
return None
with open(private_key_path, 'rb') as key:
p_key = serialization.load_pem_private_key(
key.read(),
password=private_key_passphrase.encode(),
backend=default_backend())
return p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()) | Get Snowflake private key by path or None. | Below is the the instruction that describes the task:
### Input:
Get Snowflake private key by path or None.
### Response:
def _get_private_key(cls, private_key_path, private_key_passphrase):
"""Get Snowflake private key by path or None."""
if private_key_path is None or private_key_passphrase is None:
return None
with open(private_key_path, 'rb') as key:
p_key = serialization.load_pem_private_key(
key.read(),
password=private_key_passphrase.encode(),
backend=default_backend())
return p_key.private_bytes(
encoding=serialization.Encoding.DER,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()) |
def generate_code(meta, prefix=None, node=False, min=False):
"""
Generate res.js
Args:
meta: tuple(url_prefix, auth_header, resources) or metadata of API
Returns:
res.js source code
"""
if isinstance(meta, dict):
url_prefix, auth_header, resources = parse_meta(meta)
else:
url_prefix, auth_header, resources = meta
if prefix is not None:
url_prefix = prefix
core = render_core(url_prefix, auth_header, resources)
if min:
filename = 'res.web.min.js'
else:
filename = 'res.web.js'
if node:
filename = 'res.node.js'
base = read_file(filename)
return base.replace('"#res.core.js#"', core) | Generate res.js
Args:
meta: tuple(url_prefix, auth_header, resources) or metadata of API
Returns:
res.js source code | Below is the the instruction that describes the task:
### Input:
Generate res.js
Args:
meta: tuple(url_prefix, auth_header, resources) or metadata of API
Returns:
res.js source code
### Response:
def generate_code(meta, prefix=None, node=False, min=False):
"""
Generate res.js
Args:
meta: tuple(url_prefix, auth_header, resources) or metadata of API
Returns:
res.js source code
"""
if isinstance(meta, dict):
url_prefix, auth_header, resources = parse_meta(meta)
else:
url_prefix, auth_header, resources = meta
if prefix is not None:
url_prefix = prefix
core = render_core(url_prefix, auth_header, resources)
if min:
filename = 'res.web.min.js'
else:
filename = 'res.web.js'
if node:
filename = 'res.node.js'
base = read_file(filename)
return base.replace('"#res.core.js#"', core) |
def run(self, **kwargs):
"""
Runs an instance of a scheduled task
"""
log = self.get_logger(**kwargs)
failures = ScheduleFailure.objects
log.info("Attempting to requeue <%s> failed schedules" % failures.count())
for failure in failures.iterator():
schedule = Schedule.objects.values(
"id", "auth_token", "endpoint", "payload"
)
schedule = schedule.get(id=failure.schedule_id)
schedule["schedule_id"] = str(schedule.pop("id"))
# Cleanup the failure before requeueing it.
failure.delete()
DeliverTask.apply_async(kwargs=schedule) | Runs an instance of a scheduled task | Below is the the instruction that describes the task:
### Input:
Runs an instance of a scheduled task
### Response:
def run(self, **kwargs):
"""
Runs an instance of a scheduled task
"""
log = self.get_logger(**kwargs)
failures = ScheduleFailure.objects
log.info("Attempting to requeue <%s> failed schedules" % failures.count())
for failure in failures.iterator():
schedule = Schedule.objects.values(
"id", "auth_token", "endpoint", "payload"
)
schedule = schedule.get(id=failure.schedule_id)
schedule["schedule_id"] = str(schedule.pop("id"))
# Cleanup the failure before requeueing it.
failure.delete()
DeliverTask.apply_async(kwargs=schedule) |
def dl_pb_file(inputs):
"""
Download a file from physiobank.
The input args are to be unpacked for the use of multiprocessing
map, because python2 doesn't have starmap...
"""
basefile, subdir, db, dl_dir, keep_subdirs, overwrite = inputs
# Full url of file
url = posixpath.join(config.db_index_url, db, subdir, basefile)
# Supposed size of the file
remote_file_size = _remote_file_size(url)
# Figure out where the file should be locally
if keep_subdirs:
dldir = os.path.join(dl_dir, subdir)
else:
dldir = dl_dir
local_file = os.path.join(dldir, basefile)
# The file exists locally.
if os.path.isfile(local_file):
# Redownload regardless
if overwrite:
dl_full_file(url, local_file)
# Process accordingly.
else:
local_file_size = os.path.getsize(local_file)
# Local file is smaller than it should be. Append it.
if local_file_size < remote_file_size:
print('Detected partially downloaded file: %s Appending file...' % local_file)
headers = {"Range": "bytes="+str(local_file_size)+"-", 'Accept-Encoding': '*'}
r = requests.get(url, headers=headers, stream=True)
print('headers: ', headers)
print('r content length: ', len(r.content))
with open(local_file, 'ba') as writefile:
writefile.write(r.content)
print('Done appending.')
# Local file is larger than it should be. Redownload.
elif local_file_size > remote_file_size:
dl_full_file(url, local_file)
# If they're the same size, do nothing.
# The file doesn't exist. Download it.
else:
dl_full_file(url, local_file)
return | Download a file from physiobank.
The input args are to be unpacked for the use of multiprocessing
map, because python2 doesn't have starmap... | Below is the the instruction that describes the task:
### Input:
Download a file from physiobank.
The input args are to be unpacked for the use of multiprocessing
map, because python2 doesn't have starmap...
### Response:
def dl_pb_file(inputs):
"""
Download a file from physiobank.
The input args are to be unpacked for the use of multiprocessing
map, because python2 doesn't have starmap...
"""
basefile, subdir, db, dl_dir, keep_subdirs, overwrite = inputs
# Full url of file
url = posixpath.join(config.db_index_url, db, subdir, basefile)
# Supposed size of the file
remote_file_size = _remote_file_size(url)
# Figure out where the file should be locally
if keep_subdirs:
dldir = os.path.join(dl_dir, subdir)
else:
dldir = dl_dir
local_file = os.path.join(dldir, basefile)
# The file exists locally.
if os.path.isfile(local_file):
# Redownload regardless
if overwrite:
dl_full_file(url, local_file)
# Process accordingly.
else:
local_file_size = os.path.getsize(local_file)
# Local file is smaller than it should be. Append it.
if local_file_size < remote_file_size:
print('Detected partially downloaded file: %s Appending file...' % local_file)
headers = {"Range": "bytes="+str(local_file_size)+"-", 'Accept-Encoding': '*'}
r = requests.get(url, headers=headers, stream=True)
print('headers: ', headers)
print('r content length: ', len(r.content))
with open(local_file, 'ba') as writefile:
writefile.write(r.content)
print('Done appending.')
# Local file is larger than it should be. Redownload.
elif local_file_size > remote_file_size:
dl_full_file(url, local_file)
# If they're the same size, do nothing.
# The file doesn't exist. Download it.
else:
dl_full_file(url, local_file)
return |
def create(self, **kwargs):
"""Create a new Application.
Args:
**kwargs: Arbitrary keyword arguments, including:
name (str): A name for the new Application.
Returns:
A round.Application object if successful.
"""
resource = self.resource.create(kwargs)
if 'admin_token' in kwargs:
resource.context.authorize('Gem-Application',
api_token=resource.api_token,
admin_token=kwargs['admin_token'])
app = self.wrap(resource)
return self.add(app) | Create a new Application.
Args:
**kwargs: Arbitrary keyword arguments, including:
name (str): A name for the new Application.
Returns:
A round.Application object if successful. | Below is the the instruction that describes the task:
### Input:
Create a new Application.
Args:
**kwargs: Arbitrary keyword arguments, including:
name (str): A name for the new Application.
Returns:
A round.Application object if successful.
### Response:
def create(self, **kwargs):
"""Create a new Application.
Args:
**kwargs: Arbitrary keyword arguments, including:
name (str): A name for the new Application.
Returns:
A round.Application object if successful.
"""
resource = self.resource.create(kwargs)
if 'admin_token' in kwargs:
resource.context.authorize('Gem-Application',
api_token=resource.api_token,
admin_token=kwargs['admin_token'])
app = self.wrap(resource)
return self.add(app) |
def save_storage(self, instance, schema):
"""Save basic:json values to a Storage collection."""
for field_schema, fields in iterate_fields(instance, schema):
name = field_schema['name']
value = fields[name]
if field_schema.get('type', '').startswith('basic:json:'):
if value and not self.pk:
raise ValidationError(
'Data object must be `created` before creating `basic:json:` fields')
if isinstance(value, int):
# already in Storage
continue
if isinstance(value, str):
file_path = self.location.get_path(filename=value) # pylint: disable=no-member
if os.path.isfile(file_path):
try:
with open(file_path) as file_handler:
value = json.load(file_handler)
except json.JSONDecodeError:
with open(file_path) as file_handler:
content = file_handler.read()
content = content.rstrip()
raise ValidationError(
"Value of '{}' must be a valid JSON, current: {}".format(name, content)
)
storage = self.storages.create( # pylint: disable=no-member
name='Storage for data id {}'.format(self.pk),
contributor=self.contributor,
json=value,
)
# `value` is copied by value, so `fields[name]` must be changed
fields[name] = storage.pk | Save basic:json values to a Storage collection. | Below is the the instruction that describes the task:
### Input:
Save basic:json values to a Storage collection.
### Response:
def save_storage(self, instance, schema):
"""Save basic:json values to a Storage collection."""
for field_schema, fields in iterate_fields(instance, schema):
name = field_schema['name']
value = fields[name]
if field_schema.get('type', '').startswith('basic:json:'):
if value and not self.pk:
raise ValidationError(
'Data object must be `created` before creating `basic:json:` fields')
if isinstance(value, int):
# already in Storage
continue
if isinstance(value, str):
file_path = self.location.get_path(filename=value) # pylint: disable=no-member
if os.path.isfile(file_path):
try:
with open(file_path) as file_handler:
value = json.load(file_handler)
except json.JSONDecodeError:
with open(file_path) as file_handler:
content = file_handler.read()
content = content.rstrip()
raise ValidationError(
"Value of '{}' must be a valid JSON, current: {}".format(name, content)
)
storage = self.storages.create( # pylint: disable=no-member
name='Storage for data id {}'.format(self.pk),
contributor=self.contributor,
json=value,
)
# `value` is copied by value, so `fields[name]` must be changed
fields[name] = storage.pk |
def parse_scalar(scalar_data, version):
"""
Parse a Project Haystack scalar in ZINC format.
"""
try:
return hs_scalar[version].parseString(scalar_data, parseAll=True)[0]
except pp.ParseException as pe:
# Raise a new exception with the appropriate line number.
raise ZincParseException(
'Failed to parse scalar: %s' % reformat_exception(pe),
scalar_data, 1, pe.col)
except:
LOG.debug('Failing scalar data: %r (version %r)',
scalar_data, version) | Parse a Project Haystack scalar in ZINC format. | Below is the the instruction that describes the task:
### Input:
Parse a Project Haystack scalar in ZINC format.
### Response:
def parse_scalar(scalar_data, version):
"""
Parse a Project Haystack scalar in ZINC format.
"""
try:
return hs_scalar[version].parseString(scalar_data, parseAll=True)[0]
except pp.ParseException as pe:
# Raise a new exception with the appropriate line number.
raise ZincParseException(
'Failed to parse scalar: %s' % reformat_exception(pe),
scalar_data, 1, pe.col)
except:
LOG.debug('Failing scalar data: %r (version %r)',
scalar_data, version) |
def add_option(self, K=None, price=None, St=None, kind="call", pos="long"):
"""Add an option to the object's `options` container."""
kinds = {
"call": Call,
"Call": Call,
"c": Call,
"C": Call,
"put": Put,
"Put": Put,
"p": Put,
"P": Put,
}
St = self.St if St is None else St
option = kinds[kind](St=St, K=K, price=price, pos=pos)
self.options.append(option) | Add an option to the object's `options` container. | Below is the the instruction that describes the task:
### Input:
Add an option to the object's `options` container.
### Response:
def add_option(self, K=None, price=None, St=None, kind="call", pos="long"):
"""Add an option to the object's `options` container."""
kinds = {
"call": Call,
"Call": Call,
"c": Call,
"C": Call,
"put": Put,
"Put": Put,
"p": Put,
"P": Put,
}
St = self.St if St is None else St
option = kinds[kind](St=St, K=K, price=price, pos=pos)
self.options.append(option) |
def create_rackservers(self):
"""Get an instance of rackservers services facade."""
return RackServers(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of rackservers services facade. | Below is the the instruction that describes the task:
### Input:
Get an instance of rackservers services facade.
### Response:
def create_rackservers(self):
"""Get an instance of rackservers services facade."""
return RackServers(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) |
def generate_tags_multiple_files(input_files, tag, ignore_tags, ns=None):
"""
Calls xmltag generator for multiple files.
"""
return itertools.chain.from_iterable([generate_xmltags(
fn, tag, ignore_tags, ns) for fn in input_files]) | Calls xmltag generator for multiple files. | Below is the the instruction that describes the task:
### Input:
Calls xmltag generator for multiple files.
### Response:
def generate_tags_multiple_files(input_files, tag, ignore_tags, ns=None):
"""
Calls xmltag generator for multiple files.
"""
return itertools.chain.from_iterable([generate_xmltags(
fn, tag, ignore_tags, ns) for fn in input_files]) |
def _copy_docstring(original_fn, new_fn):
"""Wraps new_fn with the doc of original_fn."""
original_spec = tf_inspect.getfullargspec(original_fn)
new_spec = tf_inspect.getfullargspec(new_fn)
if original_spec != new_spec:
raise ValueError(
'Arg specs do not match: original={}, new={}, fn={}'.format(
original_spec, new_spec, original_fn))
@decorator.decorator
def wrap(wrapped_fn, *args, **kwargs):
del wrapped_fn
return new_fn(*args, **kwargs)
return wrap(original_fn) | Wraps new_fn with the doc of original_fn. | Below is the the instruction that describes the task:
### Input:
Wraps new_fn with the doc of original_fn.
### Response:
def _copy_docstring(original_fn, new_fn):
"""Wraps new_fn with the doc of original_fn."""
original_spec = tf_inspect.getfullargspec(original_fn)
new_spec = tf_inspect.getfullargspec(new_fn)
if original_spec != new_spec:
raise ValueError(
'Arg specs do not match: original={}, new={}, fn={}'.format(
original_spec, new_spec, original_fn))
@decorator.decorator
def wrap(wrapped_fn, *args, **kwargs):
del wrapped_fn
return new_fn(*args, **kwargs)
return wrap(original_fn) |
def _get_names(self):
"""Get the names of the objects to include in the table.
:returns: The names of the objects to include.
:rtype: generator(str)
"""
for line in self.content:
line = line.strip()
if line and re.search("^[a-zA-Z0-9]", line):
yield line | Get the names of the objects to include in the table.
:returns: The names of the objects to include.
:rtype: generator(str) | Below is the the instruction that describes the task:
### Input:
Get the names of the objects to include in the table.
:returns: The names of the objects to include.
:rtype: generator(str)
### Response:
def _get_names(self):
"""Get the names of the objects to include in the table.
:returns: The names of the objects to include.
:rtype: generator(str)
"""
for line in self.content:
line = line.strip()
if line and re.search("^[a-zA-Z0-9]", line):
yield line |
def _load_cache(self):
"""Load existing cache and merge for updating if required."""
if not self._disable_cache and os.path.exists(self._cache_path):
_LOGGER.debug("Cache found at: %s", self._cache_path)
loaded_cache = UTILS.load_cache(self._cache_path)
UTILS.update(self._cache, loaded_cache)
self._save_cache() | Load existing cache and merge for updating if required. | Below is the the instruction that describes the task:
### Input:
Load existing cache and merge for updating if required.
### Response:
def _load_cache(self):
"""Load existing cache and merge for updating if required."""
if not self._disable_cache and os.path.exists(self._cache_path):
_LOGGER.debug("Cache found at: %s", self._cache_path)
loaded_cache = UTILS.load_cache(self._cache_path)
UTILS.update(self._cache, loaded_cache)
self._save_cache() |
def write(self):
"""If data exists for the entity, writes it to disk as a .JSON file with
the url-encoded URI as the filename and returns True. Else, returns
False."""
if (self.data):
dataPath = self.client.local_dir / (urllib.parse.quote_plus(self.uri)+'.json')
with dataPath.open(mode='w') as dump_file:
json.dump(self.data, dump_file)
dump_file.close()
logger.info('Wrote ' + self.uri + ' to file')
return True
else:
logger.warning('No data to write for ' + self.uri)
return False | If data exists for the entity, writes it to disk as a .JSON file with
the url-encoded URI as the filename and returns True. Else, returns
False. | Below is the the instruction that describes the task:
### Input:
If data exists for the entity, writes it to disk as a .JSON file with
the url-encoded URI as the filename and returns True. Else, returns
False.
### Response:
def write(self):
"""If data exists for the entity, writes it to disk as a .JSON file with
the url-encoded URI as the filename and returns True. Else, returns
False."""
if (self.data):
dataPath = self.client.local_dir / (urllib.parse.quote_plus(self.uri)+'.json')
with dataPath.open(mode='w') as dump_file:
json.dump(self.data, dump_file)
dump_file.close()
logger.info('Wrote ' + self.uri + ' to file')
return True
else:
logger.warning('No data to write for ' + self.uri)
return False |
def schema_delete_field(cls, key):
"""Deletes a field."""
root = '/'.join([API_ROOT, 'schemas', cls.__name__])
payload = {
'className': cls.__name__,
'fields': {
key: {
'__op': 'Delete'
}
}
}
cls.PUT(root, **payload) | Deletes a field. | Below is the the instruction that describes the task:
### Input:
Deletes a field.
### Response:
def schema_delete_field(cls, key):
"""Deletes a field."""
root = '/'.join([API_ROOT, 'schemas', cls.__name__])
payload = {
'className': cls.__name__,
'fields': {
key: {
'__op': 'Delete'
}
}
}
cls.PUT(root, **payload) |
def wr_dat_files(self, expanded=False, write_dir=''):
"""
Write each of the specified dat files
"""
# Get the set of dat files to be written, and
# the channels to be written to each file.
file_names, dat_channels = describe_list_indices(self.file_name)
# Get the fmt and byte offset corresponding to each dat file
DAT_FMTS = {}
dat_offsets = {}
for fn in file_names:
DAT_FMTS[fn] = self.fmt[dat_channels[fn][0]]
# byte_offset may not be present
if self.byte_offset is None:
dat_offsets[fn] = 0
else:
dat_offsets[fn] = self.byte_offset[dat_channels[fn][0]]
# Write the dat files
if expanded:
for fn in file_names:
wr_dat_file(fn, DAT_FMTS[fn], None , dat_offsets[fn], True,
[self.e_d_signal[ch] for ch in dat_channels[fn]],
self.samps_per_frame, write_dir=write_dir)
else:
# Create a copy to prevent overwrite
dsig = self.d_signal.copy()
for fn in file_names:
wr_dat_file(fn, DAT_FMTS[fn],
dsig[:, dat_channels[fn][0]:dat_channels[fn][-1]+1],
dat_offsets[fn], write_dir=write_dir) | Write each of the specified dat files | Below is the the instruction that describes the task:
### Input:
Write each of the specified dat files
### Response:
def wr_dat_files(self, expanded=False, write_dir=''):
"""
Write each of the specified dat files
"""
# Get the set of dat files to be written, and
# the channels to be written to each file.
file_names, dat_channels = describe_list_indices(self.file_name)
# Get the fmt and byte offset corresponding to each dat file
DAT_FMTS = {}
dat_offsets = {}
for fn in file_names:
DAT_FMTS[fn] = self.fmt[dat_channels[fn][0]]
# byte_offset may not be present
if self.byte_offset is None:
dat_offsets[fn] = 0
else:
dat_offsets[fn] = self.byte_offset[dat_channels[fn][0]]
# Write the dat files
if expanded:
for fn in file_names:
wr_dat_file(fn, DAT_FMTS[fn], None , dat_offsets[fn], True,
[self.e_d_signal[ch] for ch in dat_channels[fn]],
self.samps_per_frame, write_dir=write_dir)
else:
# Create a copy to prevent overwrite
dsig = self.d_signal.copy()
for fn in file_names:
wr_dat_file(fn, DAT_FMTS[fn],
dsig[:, dat_channels[fn][0]:dat_channels[fn][-1]+1],
dat_offsets[fn], write_dir=write_dir) |
def master_main(painter, router, select, delay):
"""
Loop until CTRL+C is pressed, waiting for the next result delivered by the
Select. Use parse_output() to turn that result ('ps' command output) into
rich data, and finally repaint the screen if the repaint delay has passed.
"""
next_paint = 0
while True:
msg = select.get()
parse_output(msg.receiver.host, msg.unpickle())
if next_paint < time.time():
next_paint = time.time() + delay
painter.paint() | Loop until CTRL+C is pressed, waiting for the next result delivered by the
Select. Use parse_output() to turn that result ('ps' command output) into
rich data, and finally repaint the screen if the repaint delay has passed. | Below is the the instruction that describes the task:
### Input:
Loop until CTRL+C is pressed, waiting for the next result delivered by the
Select. Use parse_output() to turn that result ('ps' command output) into
rich data, and finally repaint the screen if the repaint delay has passed.
### Response:
def master_main(painter, router, select, delay):
"""
Loop until CTRL+C is pressed, waiting for the next result delivered by the
Select. Use parse_output() to turn that result ('ps' command output) into
rich data, and finally repaint the screen if the repaint delay has passed.
"""
next_paint = 0
while True:
msg = select.get()
parse_output(msg.receiver.host, msg.unpickle())
if next_paint < time.time():
next_paint = time.time() + delay
painter.paint() |
def updateflags(self, flags):
"""
Thin wrapper around build_update(flags=X). This only handles simple
status changes, anything like needinfo requestee needs to call
build_update + update_bugs directly
:param flags: Dictionary of the form {"flagname": "status"}, example
{"needinfo": "?", "devel_ack": "+"}
"""
flaglist = []
for key, value in flags.items():
flaglist.append({"name": key, "status": value})
return self.bugzilla.update_bugs([self.bug_id],
self.bugzilla.build_update(flags=flaglist)) | Thin wrapper around build_update(flags=X). This only handles simple
status changes, anything like needinfo requestee needs to call
build_update + update_bugs directly
:param flags: Dictionary of the form {"flagname": "status"}, example
{"needinfo": "?", "devel_ack": "+"} | Below is the the instruction that describes the task:
### Input:
Thin wrapper around build_update(flags=X). This only handles simple
status changes, anything like needinfo requestee needs to call
build_update + update_bugs directly
:param flags: Dictionary of the form {"flagname": "status"}, example
{"needinfo": "?", "devel_ack": "+"}
### Response:
def updateflags(self, flags):
"""
Thin wrapper around build_update(flags=X). This only handles simple
status changes, anything like needinfo requestee needs to call
build_update + update_bugs directly
:param flags: Dictionary of the form {"flagname": "status"}, example
{"needinfo": "?", "devel_ack": "+"}
"""
flaglist = []
for key, value in flags.items():
flaglist.append({"name": key, "status": value})
return self.bugzilla.update_bugs([self.bug_id],
self.bugzilla.build_update(flags=flaglist)) |
def make_connection(self, node):
"""
Create a new connection
"""
if self.count_all_num_connections(node) >= self.max_connections:
if self.max_connections_per_node:
raise RedisClusterException("Too many connection ({0}) for node: {1}"
.format(self.count_all_num_connections(node),
node['name']))
raise RedisClusterException("Too many connections")
self._created_connections_per_node.setdefault(node['name'], 0)
self._created_connections_per_node[node['name']] += 1
connection = self.connection_class(host=node["host"],
port=node["port"],
**self.connection_kwargs)
# Must store node in the connection to make it eaiser to track
connection.node = node
return connection | Create a new connection | Below is the the instruction that describes the task:
### Input:
Create a new connection
### Response:
def make_connection(self, node):
"""
Create a new connection
"""
if self.count_all_num_connections(node) >= self.max_connections:
if self.max_connections_per_node:
raise RedisClusterException("Too many connection ({0}) for node: {1}"
.format(self.count_all_num_connections(node),
node['name']))
raise RedisClusterException("Too many connections")
self._created_connections_per_node.setdefault(node['name'], 0)
self._created_connections_per_node[node['name']] += 1
connection = self.connection_class(host=node["host"],
port=node["port"],
**self.connection_kwargs)
# Must store node in the connection to make it eaiser to track
connection.node = node
return connection |
def _FormatPackedIPv6Address(self, packed_ip_address):
"""Formats a packed IPv6 address as a human readable string.
Args:
packed_ip_address (list[int]): packed IPv6 address.
Returns:
str: human readable IPv6 address.
"""
# Note that socket.inet_ntop() is not supported on Windows.
octet_pairs = zip(packed_ip_address[0::2], packed_ip_address[1::2])
octet_pairs = [octet1 << 8 | octet2 for octet1, octet2 in octet_pairs]
# TODO: omit ":0000" from the string.
return ':'.join([
'{0:04x}'.format(octet_pair) for octet_pair in octet_pairs]) | Formats a packed IPv6 address as a human readable string.
Args:
packed_ip_address (list[int]): packed IPv6 address.
Returns:
str: human readable IPv6 address. | Below is the the instruction that describes the task:
### Input:
Formats a packed IPv6 address as a human readable string.
Args:
packed_ip_address (list[int]): packed IPv6 address.
Returns:
str: human readable IPv6 address.
### Response:
def _FormatPackedIPv6Address(self, packed_ip_address):
"""Formats a packed IPv6 address as a human readable string.
Args:
packed_ip_address (list[int]): packed IPv6 address.
Returns:
str: human readable IPv6 address.
"""
# Note that socket.inet_ntop() is not supported on Windows.
octet_pairs = zip(packed_ip_address[0::2], packed_ip_address[1::2])
octet_pairs = [octet1 << 8 | octet2 for octet1, octet2 in octet_pairs]
# TODO: omit ":0000" from the string.
return ':'.join([
'{0:04x}'.format(octet_pair) for octet_pair in octet_pairs]) |
def command_template(self):
"""Build and return a string that can be used as a template invoking
this chain from the command line.
The actual command can be obtainted by using
`self.command_template().format(**self.args)`
"""
com_out = self.appname
arg_string = ""
flag_string = ""
# Loop over the key, value pairs in self.args
for key, val in self.args.items():
# Check if the value is set in self._options
# If so, get the value from there
if val is None:
opt_val = self._options[key][0]
else:
opt_val = val
opt_type = self._options[key][2]
if key == 'args':
# 'args' is special, pull it out and move it to the back
arg_string += ' {%s}' % key
elif opt_type is bool:
if opt_val:
flag_string += ' --%s' % (key)
elif opt_type is list:
if is_null(opt_val):
continue
elif isinstance(opt_val, str):
com_out += ' --%s %s' % (key, opt_val)
elif isinstance(opt_val, list):
for arg_val in opt_val:
com_out += ' --%s %s' % (key, arg_val)
else:
com_out += ' --%s {%s}' % (key, key)
com_out += flag_string
com_out += arg_string
return com_out | Build and return a string that can be used as a template invoking
this chain from the command line.
The actual command can be obtainted by using
`self.command_template().format(**self.args)` | Below is the the instruction that describes the task:
### Input:
Build and return a string that can be used as a template invoking
this chain from the command line.
The actual command can be obtainted by using
`self.command_template().format(**self.args)`
### Response:
def command_template(self):
"""Build and return a string that can be used as a template invoking
this chain from the command line.
The actual command can be obtainted by using
`self.command_template().format(**self.args)`
"""
com_out = self.appname
arg_string = ""
flag_string = ""
# Loop over the key, value pairs in self.args
for key, val in self.args.items():
# Check if the value is set in self._options
# If so, get the value from there
if val is None:
opt_val = self._options[key][0]
else:
opt_val = val
opt_type = self._options[key][2]
if key == 'args':
# 'args' is special, pull it out and move it to the back
arg_string += ' {%s}' % key
elif opt_type is bool:
if opt_val:
flag_string += ' --%s' % (key)
elif opt_type is list:
if is_null(opt_val):
continue
elif isinstance(opt_val, str):
com_out += ' --%s %s' % (key, opt_val)
elif isinstance(opt_val, list):
for arg_val in opt_val:
com_out += ' --%s %s' % (key, arg_val)
else:
com_out += ' --%s {%s}' % (key, key)
com_out += flag_string
com_out += arg_string
return com_out |
def load(settings=None, namespace=None, prefix=None):
"""
Call this guy to init pyaas stuffs
:param settings: Alternative name of ini file to load
:param namespace: Namespace is used to derive paths, pass '' for an empty namespace
:param prefix: The root path of the app
:return: None
"""
parent = pyaas.util.getParent()
script_name = os.path.basename(parent)
script_name = script_name.rsplit('.', 1)[0]
if prefix is None:
# get the filename of the caller
# get the directory name of the file
prefix = os.path.dirname(parent)
if prefix.endswith(os.path.sep + 'bin'):
prefix = os.path.join(prefix, '..')
prefix = os.path.abspath(prefix)
prefix = os.path.abspath(prefix)
if pyaas.prefix != prefix:
pyaas.prefix = prefix
logging.debug('Setting prefix to "%s"', pyaas.prefix)
if namespace is None:
namespace = script_name
if namespace != pyaas.namespace:
pyaas.namespace = namespace
logging.debug('Setting namespace to "%s"', pyaas.namespace)
# if settings is not passed in use the supplied or derived namespace
settings = settings or namespace
pyaas.args = pyaas.argparser.parse_args()
pyaas.config = configparser.SafeConfigParser(dict_type=collections.OrderedDict)
pyaas.config.optionxform = str
ini_files = [
pyaas.paths('etc', settings + '.ini'),
pyaas.paths('etc', settings + '.ini.local')
]
if pyaas.args.ini:
ini_files.append(pyaas.args.ini)
try:
ok = pyaas.config.read(ini_files)
except configparser.ParsingError as e:
raise pyaas.error('Unable to parse file: %s', e)
if not ok:
raise pyaas.error('Unable to read config file(s): %s', ini_files)
# setup file log
file_name = '%s_%s.log' % (script_name, time.strftime('%Y%m%d_%H%M%S'))
# hack back in single log file option without breaking existing code
if pyaas.config.has_section('logging'):
if pyaas.config.has_option('logging', 'append'):
append = pyaas.config.getboolean('logging', 'append')
if append:
file_name = script_name + '.log'
full_path = pyaas.paths('var', file_name)
logfile = logging.FileHandler(full_path)
logfile.setLevel(logging.INFO)
logfile.setFormatter(
logging.Formatter(
fmt = '%(asctime)s %(levelname)-8s %(message)s',
datefmt = '%Y-%m-%d %H:%M:%S',
)
)
# add the handlers to the logger
root = logging.getLogger()
root.addHandler(logfile)
if pyaas.args.debug:
root.setLevel(logging.DEBUG)
logfile.setLevel(logging.DEBUG)
# call this here if there is no daemon option
if not hasattr(pyaas.args, 'daemon'):
pyaas.module.load()
return | Call this guy to init pyaas stuffs
:param settings: Alternative name of ini file to load
:param namespace: Namespace is used to derive paths, pass '' for an empty namespace
:param prefix: The root path of the app
:return: None | Below is the the instruction that describes the task:
### Input:
Call this guy to init pyaas stuffs
:param settings: Alternative name of ini file to load
:param namespace: Namespace is used to derive paths, pass '' for an empty namespace
:param prefix: The root path of the app
:return: None
### Response:
def load(settings=None, namespace=None, prefix=None):
"""
Call this guy to init pyaas stuffs
:param settings: Alternative name of ini file to load
:param namespace: Namespace is used to derive paths, pass '' for an empty namespace
:param prefix: The root path of the app
:return: None
"""
parent = pyaas.util.getParent()
script_name = os.path.basename(parent)
script_name = script_name.rsplit('.', 1)[0]
if prefix is None:
# get the filename of the caller
# get the directory name of the file
prefix = os.path.dirname(parent)
if prefix.endswith(os.path.sep + 'bin'):
prefix = os.path.join(prefix, '..')
prefix = os.path.abspath(prefix)
prefix = os.path.abspath(prefix)
if pyaas.prefix != prefix:
pyaas.prefix = prefix
logging.debug('Setting prefix to "%s"', pyaas.prefix)
if namespace is None:
namespace = script_name
if namespace != pyaas.namespace:
pyaas.namespace = namespace
logging.debug('Setting namespace to "%s"', pyaas.namespace)
# if settings is not passed in use the supplied or derived namespace
settings = settings or namespace
pyaas.args = pyaas.argparser.parse_args()
pyaas.config = configparser.SafeConfigParser(dict_type=collections.OrderedDict)
pyaas.config.optionxform = str
ini_files = [
pyaas.paths('etc', settings + '.ini'),
pyaas.paths('etc', settings + '.ini.local')
]
if pyaas.args.ini:
ini_files.append(pyaas.args.ini)
try:
ok = pyaas.config.read(ini_files)
except configparser.ParsingError as e:
raise pyaas.error('Unable to parse file: %s', e)
if not ok:
raise pyaas.error('Unable to read config file(s): %s', ini_files)
# setup file log
file_name = '%s_%s.log' % (script_name, time.strftime('%Y%m%d_%H%M%S'))
# hack back in single log file option without breaking existing code
if pyaas.config.has_section('logging'):
if pyaas.config.has_option('logging', 'append'):
append = pyaas.config.getboolean('logging', 'append')
if append:
file_name = script_name + '.log'
full_path = pyaas.paths('var', file_name)
logfile = logging.FileHandler(full_path)
logfile.setLevel(logging.INFO)
logfile.setFormatter(
logging.Formatter(
fmt = '%(asctime)s %(levelname)-8s %(message)s',
datefmt = '%Y-%m-%d %H:%M:%S',
)
)
# add the handlers to the logger
root = logging.getLogger()
root.addHandler(logfile)
if pyaas.args.debug:
root.setLevel(logging.DEBUG)
logfile.setLevel(logging.DEBUG)
# call this here if there is no daemon option
if not hasattr(pyaas.args, 'daemon'):
pyaas.module.load()
return |
def dict_to_querystring(dictionary):
"""Converts a dict to a querystring suitable to be appended to a URL."""
s = u""
for d in dictionary.keys():
s = unicode.format(u"{0}{1}={2}&", s, d, dictionary[d])
return s[:-1] | Converts a dict to a querystring suitable to be appended to a URL. | Below is the the instruction that describes the task:
### Input:
Converts a dict to a querystring suitable to be appended to a URL.
### Response:
def dict_to_querystring(dictionary):
"""Converts a dict to a querystring suitable to be appended to a URL."""
s = u""
for d in dictionary.keys():
s = unicode.format(u"{0}{1}={2}&", s, d, dictionary[d])
return s[:-1] |
def _merge_simple_selectors(a, b):
"""Merge two simple selectors, for the purposes of the LCS algorithm below.
In practice this returns the more specific selector if one is a subset of
the other, else it returns None.
"""
# TODO what about combinators
if a.is_superset_of(b):
return b
elif b.is_superset_of(a):
return a
else:
return None | Merge two simple selectors, for the purposes of the LCS algorithm below.
In practice this returns the more specific selector if one is a subset of
the other, else it returns None. | Below is the the instruction that describes the task:
### Input:
Merge two simple selectors, for the purposes of the LCS algorithm below.
In practice this returns the more specific selector if one is a subset of
the other, else it returns None.
### Response:
def _merge_simple_selectors(a, b):
"""Merge two simple selectors, for the purposes of the LCS algorithm below.
In practice this returns the more specific selector if one is a subset of
the other, else it returns None.
"""
# TODO what about combinators
if a.is_superset_of(b):
return b
elif b.is_superset_of(a):
return a
else:
return None |
def html_clean(html_code):
"""获取网页源代码并进行预处理
Keyword arguments:
html_code -- 网页源代码,字符串类型
Return:
清洗后的网页源代码(只包含文本和换行符\n)
"""
temp = re.sub('<script([\s\S]*?)</script>', '', html_code)
temp = re.sub('<style([\s\S]*?)</style>', '', temp)
html_cleaned = re.sub('(?is)<.*?>', '', temp)
for item in html_character:
html_cleaned = html_cleaned.replace(item, html_character[item])
log('debug', '网页源代码预处理完成:\n【{}】'.format(html_cleaned))
return html_cleaned | 获取网页源代码并进行预处理
Keyword arguments:
html_code -- 网页源代码,字符串类型
Return:
清洗后的网页源代码(只包含文本和换行符\n) | Below is the the instruction that describes the task:
### Input:
获取网页源代码并进行预处理
Keyword arguments:
html_code -- 网页源代码,字符串类型
Return:
清洗后的网页源代码(只包含文本和换行符\n)
### Response:
def html_clean(html_code):
"""获取网页源代码并进行预处理
Keyword arguments:
html_code -- 网页源代码,字符串类型
Return:
清洗后的网页源代码(只包含文本和换行符\n)
"""
temp = re.sub('<script([\s\S]*?)</script>', '', html_code)
temp = re.sub('<style([\s\S]*?)</style>', '', temp)
html_cleaned = re.sub('(?is)<.*?>', '', temp)
for item in html_character:
html_cleaned = html_cleaned.replace(item, html_character[item])
log('debug', '网页源代码预处理完成:\n【{}】'.format(html_cleaned))
return html_cleaned |
def mass_integral(self, x, axis_ratio):
"""Routine to integrate an elliptical light profiles - set axis ratio to 1 to compute the luminosity within a \
circle"""
r = x * axis_ratio
return 2 * np.pi * r * self.convergence_func(x) | Routine to integrate an elliptical light profiles - set axis ratio to 1 to compute the luminosity within a \
circle | Below is the the instruction that describes the task:
### Input:
Routine to integrate an elliptical light profiles - set axis ratio to 1 to compute the luminosity within a \
circle
### Response:
def mass_integral(self, x, axis_ratio):
"""Routine to integrate an elliptical light profiles - set axis ratio to 1 to compute the luminosity within a \
circle"""
r = x * axis_ratio
return 2 * np.pi * r * self.convergence_func(x) |
def _get_norm_opts(self, obj):
"""
Gets the normalization options for a LabelledData object by
traversing the object to find elements and their ids.
The id is then used to select the appropriate OptionsTree,
accumulating the normalization options into a dictionary.
Returns a dictionary of normalization options for each
element in the tree.
"""
norm_opts = {}
# Get all elements' type.group.label specs and ids
type_val_fn = lambda x: (x.id, (type(x).__name__, util.group_sanitizer(x.group, escape=False),
util.label_sanitizer(x.label, escape=False))) \
if isinstance(x, Element) else None
element_specs = {(idspec[0], idspec[1]) for idspec in obj.traverse(type_val_fn)
if idspec is not None}
# Group elements specs by ID and override normalization
# options sequentially
key_fn = lambda x: -1 if x[0] is None else x[0]
id_groups = groupby(sorted(element_specs, key=key_fn), key_fn)
for gid, element_spec_group in id_groups:
gid = None if gid == -1 else gid
group_specs = [el for _, el in element_spec_group]
backend = self.renderer.backend
optstree = Store.custom_options(
backend=backend).get(gid, Store.options(backend=backend))
# Get the normalization options for the current id
# and match against customizable elements
for opts in optstree:
path = tuple(opts.path.split('.')[1:])
applies = any(path == spec[:i] for spec in group_specs
for i in range(1, 4))
if applies and 'norm' in opts.groups:
nopts = opts['norm'].options
if 'axiswise' in nopts or 'framewise' in nopts:
norm_opts.update({path: (nopts.get('axiswise', False),
nopts.get('framewise', False))})
element_specs = [spec for _, spec in element_specs]
norm_opts.update({spec: (False, False) for spec in element_specs
if not any(spec[:i] in norm_opts.keys() for i in range(1, 4))})
return norm_opts | Gets the normalization options for a LabelledData object by
traversing the object to find elements and their ids.
The id is then used to select the appropriate OptionsTree,
accumulating the normalization options into a dictionary.
Returns a dictionary of normalization options for each
element in the tree. | Below is the the instruction that describes the task:
### Input:
Gets the normalization options for a LabelledData object by
traversing the object to find elements and their ids.
The id is then used to select the appropriate OptionsTree,
accumulating the normalization options into a dictionary.
Returns a dictionary of normalization options for each
element in the tree.
### Response:
def _get_norm_opts(self, obj):
"""
Gets the normalization options for a LabelledData object by
traversing the object to find elements and their ids.
The id is then used to select the appropriate OptionsTree,
accumulating the normalization options into a dictionary.
Returns a dictionary of normalization options for each
element in the tree.
"""
norm_opts = {}
# Get all elements' type.group.label specs and ids
type_val_fn = lambda x: (x.id, (type(x).__name__, util.group_sanitizer(x.group, escape=False),
util.label_sanitizer(x.label, escape=False))) \
if isinstance(x, Element) else None
element_specs = {(idspec[0], idspec[1]) for idspec in obj.traverse(type_val_fn)
if idspec is not None}
# Group elements specs by ID and override normalization
# options sequentially
key_fn = lambda x: -1 if x[0] is None else x[0]
id_groups = groupby(sorted(element_specs, key=key_fn), key_fn)
for gid, element_spec_group in id_groups:
gid = None if gid == -1 else gid
group_specs = [el for _, el in element_spec_group]
backend = self.renderer.backend
optstree = Store.custom_options(
backend=backend).get(gid, Store.options(backend=backend))
# Get the normalization options for the current id
# and match against customizable elements
for opts in optstree:
path = tuple(opts.path.split('.')[1:])
applies = any(path == spec[:i] for spec in group_specs
for i in range(1, 4))
if applies and 'norm' in opts.groups:
nopts = opts['norm'].options
if 'axiswise' in nopts or 'framewise' in nopts:
norm_opts.update({path: (nopts.get('axiswise', False),
nopts.get('framewise', False))})
element_specs = [spec for _, spec in element_specs]
norm_opts.update({spec: (False, False) for spec in element_specs
if not any(spec[:i] in norm_opts.keys() for i in range(1, 4))})
return norm_opts |
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0 | Analyzes queries from a given log file | Below is the the instruction that describes the task:
### Input:
Analyzes queries from a given log file
### Response:
def watch_profile(self):
"""Analyzes queries from a given log file"""
profile_parser = ProfileParser()
databases = self._get_requested_databases()
connection = pymongo.MongoClient(self._db_uri,
document_class=OrderedDict,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)
enabled_profile = False
if databases == []:
try:
databases = connection.database_names()
except:
message = "Error: Could not list databases on server. Please " \
+ "check the auth components of your URI.\n"
sys.stderr.write(message)
databases = []
for ignore_db in IGNORE_DBS:
if ignore_db in databases:
databases.remove(ignore_db)
if len(databases) != 1:
message = "Error: Please use namespaces (-n) to specify a single " \
+ "database for profile watching.\n"
sys.stderr.write(message)
return 1
database = databases[0]
db = connection[database]
initial_profile_level = db.profiling_level()
if initial_profile_level is pymongo.OFF:
message = "Profile level currently 0. Dex is setting profile " \
+ "level 1. To run --watch at profile level 2, " \
+ "enable profile level 2 before running Dex.\n"
sys.stderr.write(message)
db.set_profiling_level(DEFAULT_PROFILE_LEVEL)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
try:
for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):
self._process_query(profile_entry,
profile_parser)
if time.time() >= output_time:
self._output_aggregated_report(sys.stderr)
output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS
except KeyboardInterrupt:
sys.stderr.write("Interrupt received\n")
finally:
self._output_aggregated_report(sys.stdout)
if initial_profile_level is pymongo.OFF:
message = "Dex is resetting profile level to initial value " \
+ "of 0. You may wish to drop the system.profile " \
+ "collection.\n"
sys.stderr.write(message)
db.set_profiling_level(initial_profile_level)
return 0 |
def present(name,
user=None,
password=None,
auth='password',
encoding='UTF8',
locale=None,
runas=None,
waldir=None,
checksums=False):
'''
Initialize the PostgreSQL data directory
name
The name of the directory to initialize
user
The database superuser name
password
The password to set for the postgres user
auth
The default authentication method for local connections
encoding
The default encoding for new databases
locale
The default locale for new databases
waldir
The transaction log (WAL) directory (default is to keep WAL
inside the data directory)
.. versionadded:: 2019.2.0
checksums
If True, the cluster will be created with data page checksums.
.. note:: Data page checksums are supported since PostgreSQL 9.3.
.. versionadded:: 2019.2.0
runas
The system user the operation should be performed on behalf of
'''
_cmt = 'Postgres data directory {0} is already present'.format(name)
ret = {
'name': name,
'changes': {},
'result': True,
'comment': _cmt}
if not __salt__['postgres.datadir_exists'](name=name):
if __opts__['test']:
ret['result'] = None
_cmt = 'Postgres data directory {0} is set to be initialized'\
.format(name)
ret['comment'] = _cmt
return ret
kwargs = dict(
user=user,
password=password,
auth=auth,
encoding=encoding,
locale=locale,
waldir=waldir,
checksums=checksums,
runas=runas)
if __salt__['postgres.datadir_init'](name, **kwargs):
_cmt = 'Postgres data directory {0} has been initialized'\
.format(name)
ret['comment'] = _cmt
ret['changes'][name] = 'Present'
else:
_cmt = 'Postgres data directory {0} initialization failed'\
.format(name)
ret['result'] = False
ret['comment'] = _cmt
return ret | Initialize the PostgreSQL data directory
name
The name of the directory to initialize
user
The database superuser name
password
The password to set for the postgres user
auth
The default authentication method for local connections
encoding
The default encoding for new databases
locale
The default locale for new databases
waldir
The transaction log (WAL) directory (default is to keep WAL
inside the data directory)
.. versionadded:: 2019.2.0
checksums
If True, the cluster will be created with data page checksums.
.. note:: Data page checksums are supported since PostgreSQL 9.3.
.. versionadded:: 2019.2.0
runas
The system user the operation should be performed on behalf of | Below is the the instruction that describes the task:
### Input:
Initialize the PostgreSQL data directory
name
The name of the directory to initialize
user
The database superuser name
password
The password to set for the postgres user
auth
The default authentication method for local connections
encoding
The default encoding for new databases
locale
The default locale for new databases
waldir
The transaction log (WAL) directory (default is to keep WAL
inside the data directory)
.. versionadded:: 2019.2.0
checksums
If True, the cluster will be created with data page checksums.
.. note:: Data page checksums are supported since PostgreSQL 9.3.
.. versionadded:: 2019.2.0
runas
The system user the operation should be performed on behalf of
### Response:
def present(name,
user=None,
password=None,
auth='password',
encoding='UTF8',
locale=None,
runas=None,
waldir=None,
checksums=False):
'''
Initialize the PostgreSQL data directory
name
The name of the directory to initialize
user
The database superuser name
password
The password to set for the postgres user
auth
The default authentication method for local connections
encoding
The default encoding for new databases
locale
The default locale for new databases
waldir
The transaction log (WAL) directory (default is to keep WAL
inside the data directory)
.. versionadded:: 2019.2.0
checksums
If True, the cluster will be created with data page checksums.
.. note:: Data page checksums are supported since PostgreSQL 9.3.
.. versionadded:: 2019.2.0
runas
The system user the operation should be performed on behalf of
'''
_cmt = 'Postgres data directory {0} is already present'.format(name)
ret = {
'name': name,
'changes': {},
'result': True,
'comment': _cmt}
if not __salt__['postgres.datadir_exists'](name=name):
if __opts__['test']:
ret['result'] = None
_cmt = 'Postgres data directory {0} is set to be initialized'\
.format(name)
ret['comment'] = _cmt
return ret
kwargs = dict(
user=user,
password=password,
auth=auth,
encoding=encoding,
locale=locale,
waldir=waldir,
checksums=checksums,
runas=runas)
if __salt__['postgres.datadir_init'](name, **kwargs):
_cmt = 'Postgres data directory {0} has been initialized'\
.format(name)
ret['comment'] = _cmt
ret['changes'][name] = 'Present'
else:
_cmt = 'Postgres data directory {0} initialization failed'\
.format(name)
ret['result'] = False
ret['comment'] = _cmt
return ret |
def resolve_type_spec(self, name, lineno):
"""Finds and links the TypeSpec with the given name."""
if name in self.type_specs:
return self.type_specs[name].link(self)
if '.' in name:
include_name, component = name.split('.', 1)
if include_name in self.included_scopes:
return self.included_scopes[include_name].resolve_type_spec(
component, lineno
)
raise ThriftCompilerError(
'Unknown type "%s" referenced at line %d%s' % (
name, lineno, self.__in_path()
)
) | Finds and links the TypeSpec with the given name. | Below is the the instruction that describes the task:
### Input:
Finds and links the TypeSpec with the given name.
### Response:
def resolve_type_spec(self, name, lineno):
"""Finds and links the TypeSpec with the given name."""
if name in self.type_specs:
return self.type_specs[name].link(self)
if '.' in name:
include_name, component = name.split('.', 1)
if include_name in self.included_scopes:
return self.included_scopes[include_name].resolve_type_spec(
component, lineno
)
raise ThriftCompilerError(
'Unknown type "%s" referenced at line %d%s' % (
name, lineno, self.__in_path()
)
) |
def wait(self, pattern, timeout=10.0, safe=False, **match_kwargs):
"""Wait till pattern is found or time is out (default: 10s)."""
t = time.time() + timeout
while time.time() < t:
ret = self.exists(pattern, **match_kwargs)
if ret:
return ret
time.sleep(0.2)
if not safe:
raise errors.ImageNotFoundError('Not found image %s' %(pattern,)) | Wait till pattern is found or time is out (default: 10s). | Below is the the instruction that describes the task:
### Input:
Wait till pattern is found or time is out (default: 10s).
### Response:
def wait(self, pattern, timeout=10.0, safe=False, **match_kwargs):
"""Wait till pattern is found or time is out (default: 10s)."""
t = time.time() + timeout
while time.time() < t:
ret = self.exists(pattern, **match_kwargs)
if ret:
return ret
time.sleep(0.2)
if not safe:
raise errors.ImageNotFoundError('Not found image %s' %(pattern,)) |
def arr_to_vector(arr):
"""Reshape a multidimensional array to a vector.
"""
dim = array_dim(arr)
tmp_arr = []
for n in range(len(dim) - 1):
for inner in arr:
for i in inner:
tmp_arr.append(i)
arr = tmp_arr
tmp_arr = []
return arr | Reshape a multidimensional array to a vector. | Below is the the instruction that describes the task:
### Input:
Reshape a multidimensional array to a vector.
### Response:
def arr_to_vector(arr):
"""Reshape a multidimensional array to a vector.
"""
dim = array_dim(arr)
tmp_arr = []
for n in range(len(dim) - 1):
for inner in arr:
for i in inner:
tmp_arr.append(i)
arr = tmp_arr
tmp_arr = []
return arr |
def process_wait(process, timeout=0):
"""
Pauses script execution until a given process exists.
:param process:
:param timeout:
:return:
"""
ret = AUTO_IT.AU3_ProcessWait(LPCWSTR(process), INT(timeout))
return ret | Pauses script execution until a given process exists.
:param process:
:param timeout:
:return: | Below is the the instruction that describes the task:
### Input:
Pauses script execution until a given process exists.
:param process:
:param timeout:
:return:
### Response:
def process_wait(process, timeout=0):
"""
Pauses script execution until a given process exists.
:param process:
:param timeout:
:return:
"""
ret = AUTO_IT.AU3_ProcessWait(LPCWSTR(process), INT(timeout))
return ret |
def create_rc(self, manifest_filename, namespace="default"):
"""
Creates an RC based on a manifest.
:Parameters:
- `manifest_filename`: The manifest file containing the ReplicationController definition, for example: `manifest/nginx-webserver-rc.yaml`
- `namespace`: In which namespace the RC should be created, defaults to, well, `default`
"""
rc_manifest, rc_manifest_json = util.load_yaml(filename=manifest_filename)
logging.debug("%s" %(rc_manifest_json))
create_rc_path = "".join(["/api/v1/namespaces/", namespace, "/replicationcontrollers"])
res = self.execute_operation(method="POST", ops_path=create_rc_path, payload=rc_manifest_json)
try:
rc_url = res.json()["metadata"]["selfLink"]
except KeyError:
raise ResourceCRUDException("".join(["Sorry, can not create the RC: ", rc_manifest["metadata"]["name"], ". Maybe it exists already?"]))
logging.info("From %s I created the RC %s at %s" %(manifest_filename, rc_manifest["metadata"]["name"], rc_url))
return (res, rc_url) | Creates an RC based on a manifest.
:Parameters:
- `manifest_filename`: The manifest file containing the ReplicationController definition, for example: `manifest/nginx-webserver-rc.yaml`
- `namespace`: In which namespace the RC should be created, defaults to, well, `default` | Below is the the instruction that describes the task:
### Input:
Creates an RC based on a manifest.
:Parameters:
- `manifest_filename`: The manifest file containing the ReplicationController definition, for example: `manifest/nginx-webserver-rc.yaml`
- `namespace`: In which namespace the RC should be created, defaults to, well, `default`
### Response:
def create_rc(self, manifest_filename, namespace="default"):
"""
Creates an RC based on a manifest.
:Parameters:
- `manifest_filename`: The manifest file containing the ReplicationController definition, for example: `manifest/nginx-webserver-rc.yaml`
- `namespace`: In which namespace the RC should be created, defaults to, well, `default`
"""
rc_manifest, rc_manifest_json = util.load_yaml(filename=manifest_filename)
logging.debug("%s" %(rc_manifest_json))
create_rc_path = "".join(["/api/v1/namespaces/", namespace, "/replicationcontrollers"])
res = self.execute_operation(method="POST", ops_path=create_rc_path, payload=rc_manifest_json)
try:
rc_url = res.json()["metadata"]["selfLink"]
except KeyError:
raise ResourceCRUDException("".join(["Sorry, can not create the RC: ", rc_manifest["metadata"]["name"], ". Maybe it exists already?"]))
logging.info("From %s I created the RC %s at %s" %(manifest_filename, rc_manifest["metadata"]["name"], rc_url))
return (res, rc_url) |
def curlrequests(curl_string, **kwargs):
"""Use tPool to request for curl string.
If kwargs contains the req which hasattr request method, like req=requests.
:param curl_string: standard curl string.
:type curl_string: str
:param kwargs: valid kwargs for tPool.
:type curl_string: dict
Basic Usage::
from torequests.utils import curlrequests
r = curlrequests('''curl 'http://p.3.cn/' -H 'Connection: keep-alive' -H 'Cache-Control: max-age=0' -H 'Upgrade-Insecure-Requests: 1' -H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' -H 'DNT: 1' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' -H 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: zh-CN,zh;q=0.9,en;q=0.8' -H 'If-None-Match: "55dd9090-264"' -H 'If-Modified-Since: Wed, 26 Aug 2015 10:10:24 GMT' --compressed''', retry=1)
print(r.text)
"""
req = kwargs.pop('req', tPool())
kwargs.update(curlparse(curl_string))
return req.request(**kwargs) | Use tPool to request for curl string.
If kwargs contains the req which hasattr request method, like req=requests.
:param curl_string: standard curl string.
:type curl_string: str
:param kwargs: valid kwargs for tPool.
:type curl_string: dict
Basic Usage::
from torequests.utils import curlrequests
r = curlrequests('''curl 'http://p.3.cn/' -H 'Connection: keep-alive' -H 'Cache-Control: max-age=0' -H 'Upgrade-Insecure-Requests: 1' -H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' -H 'DNT: 1' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' -H 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: zh-CN,zh;q=0.9,en;q=0.8' -H 'If-None-Match: "55dd9090-264"' -H 'If-Modified-Since: Wed, 26 Aug 2015 10:10:24 GMT' --compressed''', retry=1)
print(r.text) | Below is the the instruction that describes the task:
### Input:
Use tPool to request for curl string.
If kwargs contains the req which hasattr request method, like req=requests.
:param curl_string: standard curl string.
:type curl_string: str
:param kwargs: valid kwargs for tPool.
:type curl_string: dict
Basic Usage::
from torequests.utils import curlrequests
r = curlrequests('''curl 'http://p.3.cn/' -H 'Connection: keep-alive' -H 'Cache-Control: max-age=0' -H 'Upgrade-Insecure-Requests: 1' -H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' -H 'DNT: 1' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' -H 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: zh-CN,zh;q=0.9,en;q=0.8' -H 'If-None-Match: "55dd9090-264"' -H 'If-Modified-Since: Wed, 26 Aug 2015 10:10:24 GMT' --compressed''', retry=1)
print(r.text)
### Response:
def curlrequests(curl_string, **kwargs):
"""Use tPool to request for curl string.
If kwargs contains the req which hasattr request method, like req=requests.
:param curl_string: standard curl string.
:type curl_string: str
:param kwargs: valid kwargs for tPool.
:type curl_string: dict
Basic Usage::
from torequests.utils import curlrequests
r = curlrequests('''curl 'http://p.3.cn/' -H 'Connection: keep-alive' -H 'Cache-Control: max-age=0' -H 'Upgrade-Insecure-Requests: 1' -H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' -H 'DNT: 1' -H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8' -H 'Accept-Encoding: gzip, deflate' -H 'Accept-Language: zh-CN,zh;q=0.9,en;q=0.8' -H 'If-None-Match: "55dd9090-264"' -H 'If-Modified-Since: Wed, 26 Aug 2015 10:10:24 GMT' --compressed''', retry=1)
print(r.text)
"""
req = kwargs.pop('req', tPool())
kwargs.update(curlparse(curl_string))
return req.request(**kwargs) |
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
"""
if format_str is None and data_path is not None:
if (tag == '1min') | (tag == '5min'):
min_fmt = ''.join(['omni_hro_', tag,
'{year:4d}{month:02d}{day:02d}_v01.cdf'])
files = pysat.Files.from_os(data_path=data_path, format_str=min_fmt)
# files are by month, just add date to monthly filename for
# each day of the month. load routine will use date to select out
# appropriate data
if not files.empty:
files.ix[files.index[-1] + pds.DateOffset(months=1) -
pds.DateOffset(days=1)] = files.iloc[-1]
files = files.asfreq('D', 'pad')
# add the date to the filename
files = files + '_' + files.index.strftime('%Y-%m-%d')
return files
else:
raise ValueError('Unknown tag')
elif format_str is None:
estr = 'A directory must be passed to the loading routine for OMNI HRO'
raise ValueError (estr)
else:
return pysat.Files.from_os(data_path=data_path, format_str=format_str) | Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files | Below is the the instruction that describes the task:
### Input:
Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
### Response:
def list_files(tag=None, sat_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for chosen satellite data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : (string or NoneType)
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
--------
pysat.Files.from_os : (pysat._files.Files)
A class containing the verified available files
"""
if format_str is None and data_path is not None:
if (tag == '1min') | (tag == '5min'):
min_fmt = ''.join(['omni_hro_', tag,
'{year:4d}{month:02d}{day:02d}_v01.cdf'])
files = pysat.Files.from_os(data_path=data_path, format_str=min_fmt)
# files are by month, just add date to monthly filename for
# each day of the month. load routine will use date to select out
# appropriate data
if not files.empty:
files.ix[files.index[-1] + pds.DateOffset(months=1) -
pds.DateOffset(days=1)] = files.iloc[-1]
files = files.asfreq('D', 'pad')
# add the date to the filename
files = files + '_' + files.index.strftime('%Y-%m-%d')
return files
else:
raise ValueError('Unknown tag')
elif format_str is None:
estr = 'A directory must be passed to the loading routine for OMNI HRO'
raise ValueError (estr)
else:
return pysat.Files.from_os(data_path=data_path, format_str=format_str) |
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0) | Percentage of times that predictions matches labels everywhere (non-0). | Below is the the instruction that describes the task:
### Input:
Percentage of times that predictions matches labels everywhere (non-0).
### Response:
def padded_sequence_accuracy(logits, labels):
"""Percentage of times that predictions matches labels everywhere (non-0)."""
with tf.variable_scope("padded_sequence_accuracy", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
outputs = tf.to_int32(tf.argmax(logits, axis=-1))
padded_labels = tf.to_int32(labels)
not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights
axis = list(range(1, len(outputs.get_shape())))
correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis))
return correct_seq, tf.constant(1.0) |
def submit_ham(self, params):
"""For submitting a ham comment to Akismet."""
# Check required params for submit-ham
for required in ['blog', 'user_ip', 'user_agent']:
if required not in params:
raise MissingParams(required)
response = self._request('submit-ham', params)
if response.status is 200:
return response.read() == "true"
return False | For submitting a ham comment to Akismet. | Below is the the instruction that describes the task:
### Input:
For submitting a ham comment to Akismet.
### Response:
def submit_ham(self, params):
"""For submitting a ham comment to Akismet."""
# Check required params for submit-ham
for required in ['blog', 'user_ip', 'user_agent']:
if required not in params:
raise MissingParams(required)
response = self._request('submit-ham', params)
if response.status is 200:
return response.read() == "true"
return False |
def _get_child_validator(self, document_crumb=None, schema_crumb=None, **kwargs):
""" Creates a new instance of Validator-(sub-)class. All initial
parameters of the parent are passed to the initialization, unless
a parameter is given as an explicit *keyword*-parameter.
:param document_crumb: Extends the
:attr:`~cerberus.Validator.document_path`
of the child-validator.
:type document_crumb: :class:`tuple` or :term:`hashable`
:param schema_crumb: Extends the
:attr:`~cerberus.Validator.schema_path`
of the child-validator.
:type schema_crumb: :class:`tuple` or hashable
:param kwargs: Overriding keyword-arguments for initialization.
:type kwargs: :class:`dict`
:return: an instance of ``self.__class__``
"""
child_config = self._config.copy()
child_config.update(kwargs)
if not self.is_child:
child_config['is_child'] = True
child_config['error_handler'] = toy_error_handler
child_config['root_allow_unknown'] = self.allow_unknown
child_config['root_require_all'] = self.require_all
child_config['root_document'] = self.document
child_config['root_schema'] = self.schema
child_validator = self.__class__(**child_config)
if document_crumb is None:
child_validator.document_path = self.document_path
else:
if not isinstance(document_crumb, tuple):
document_crumb = (document_crumb,)
child_validator.document_path = self.document_path + document_crumb
if schema_crumb is None:
child_validator.schema_path = self.schema_path
else:
if not isinstance(schema_crumb, tuple):
schema_crumb = (schema_crumb,)
child_validator.schema_path = self.schema_path + schema_crumb
return child_validator | Creates a new instance of Validator-(sub-)class. All initial
parameters of the parent are passed to the initialization, unless
a parameter is given as an explicit *keyword*-parameter.
:param document_crumb: Extends the
:attr:`~cerberus.Validator.document_path`
of the child-validator.
:type document_crumb: :class:`tuple` or :term:`hashable`
:param schema_crumb: Extends the
:attr:`~cerberus.Validator.schema_path`
of the child-validator.
:type schema_crumb: :class:`tuple` or hashable
:param kwargs: Overriding keyword-arguments for initialization.
:type kwargs: :class:`dict`
:return: an instance of ``self.__class__`` | Below is the the instruction that describes the task:
### Input:
Creates a new instance of Validator-(sub-)class. All initial
parameters of the parent are passed to the initialization, unless
a parameter is given as an explicit *keyword*-parameter.
:param document_crumb: Extends the
:attr:`~cerberus.Validator.document_path`
of the child-validator.
:type document_crumb: :class:`tuple` or :term:`hashable`
:param schema_crumb: Extends the
:attr:`~cerberus.Validator.schema_path`
of the child-validator.
:type schema_crumb: :class:`tuple` or hashable
:param kwargs: Overriding keyword-arguments for initialization.
:type kwargs: :class:`dict`
:return: an instance of ``self.__class__``
### Response:
def _get_child_validator(self, document_crumb=None, schema_crumb=None, **kwargs):
""" Creates a new instance of Validator-(sub-)class. All initial
parameters of the parent are passed to the initialization, unless
a parameter is given as an explicit *keyword*-parameter.
:param document_crumb: Extends the
:attr:`~cerberus.Validator.document_path`
of the child-validator.
:type document_crumb: :class:`tuple` or :term:`hashable`
:param schema_crumb: Extends the
:attr:`~cerberus.Validator.schema_path`
of the child-validator.
:type schema_crumb: :class:`tuple` or hashable
:param kwargs: Overriding keyword-arguments for initialization.
:type kwargs: :class:`dict`
:return: an instance of ``self.__class__``
"""
child_config = self._config.copy()
child_config.update(kwargs)
if not self.is_child:
child_config['is_child'] = True
child_config['error_handler'] = toy_error_handler
child_config['root_allow_unknown'] = self.allow_unknown
child_config['root_require_all'] = self.require_all
child_config['root_document'] = self.document
child_config['root_schema'] = self.schema
child_validator = self.__class__(**child_config)
if document_crumb is None:
child_validator.document_path = self.document_path
else:
if not isinstance(document_crumb, tuple):
document_crumb = (document_crumb,)
child_validator.document_path = self.document_path + document_crumb
if schema_crumb is None:
child_validator.schema_path = self.schema_path
else:
if not isinstance(schema_crumb, tuple):
schema_crumb = (schema_crumb,)
child_validator.schema_path = self.schema_path + schema_crumb
return child_validator |
def SetPyclassMetaclass(option, opt, value, parser, *args, **kwargs):
"""set up pyclass metaclass for complexTypes"""
from pyremotevbox.ZSI.generate.containers import ServiceHeaderContainer,\
TypecodeContainerBase, TypesHeaderContainer
TypecodeContainerBase.metaclass = kwargs['metaclass']
TypesHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
)
ServiceHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
) | set up pyclass metaclass for complexTypes | Below is the the instruction that describes the task:
### Input:
set up pyclass metaclass for complexTypes
### Response:
def SetPyclassMetaclass(option, opt, value, parser, *args, **kwargs):
"""set up pyclass metaclass for complexTypes"""
from pyremotevbox.ZSI.generate.containers import ServiceHeaderContainer,\
TypecodeContainerBase, TypesHeaderContainer
TypecodeContainerBase.metaclass = kwargs['metaclass']
TypesHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
)
ServiceHeaderContainer.imports.append(\
'from %(module)s import %(metaclass)s' %kwargs
) |
def null(self):
"""Zero crossing value."""
if not self.option.axis:
return -1
else:
return self.screen.height - (
-self.minimum * 4.0 / self.extents * self.size.y
) | Zero crossing value. | Below is the the instruction that describes the task:
### Input:
Zero crossing value.
### Response:
def null(self):
"""Zero crossing value."""
if not self.option.axis:
return -1
else:
return self.screen.height - (
-self.minimum * 4.0 / self.extents * self.size.y
) |
def _get_bmdl_ratio(self, models):
"""Return BMDL ratio in list of models."""
bmdls = [model.output["BMDL"] for model in models if model.output["BMDL"] > 0]
return max(bmdls) / min(bmdls) if len(bmdls) > 0 else 0 | Return BMDL ratio in list of models. | Below is the the instruction that describes the task:
### Input:
Return BMDL ratio in list of models.
### Response:
def _get_bmdl_ratio(self, models):
"""Return BMDL ratio in list of models."""
bmdls = [model.output["BMDL"] for model in models if model.output["BMDL"] > 0]
return max(bmdls) / min(bmdls) if len(bmdls) > 0 else 0 |
def mumps(self):
"""Call MUMPS, checking for errors in the return code.
The desired job should have already been set using `ctx.set_job(...)`.
As a convenience, you may wish to call `ctx.run(job=...)` which sets
the job and calls MUMPS.
"""
self._mumps_c(self.id)
if self.id.infog[0] < 0:
raise RuntimeError("MUMPS error: %d" % self.id.infog[0]) | Call MUMPS, checking for errors in the return code.
The desired job should have already been set using `ctx.set_job(...)`.
As a convenience, you may wish to call `ctx.run(job=...)` which sets
the job and calls MUMPS. | Below is the the instruction that describes the task:
### Input:
Call MUMPS, checking for errors in the return code.
The desired job should have already been set using `ctx.set_job(...)`.
As a convenience, you may wish to call `ctx.run(job=...)` which sets
the job and calls MUMPS.
### Response:
def mumps(self):
"""Call MUMPS, checking for errors in the return code.
The desired job should have already been set using `ctx.set_job(...)`.
As a convenience, you may wish to call `ctx.run(job=...)` which sets
the job and calls MUMPS.
"""
self._mumps_c(self.id)
if self.id.infog[0] < 0:
raise RuntimeError("MUMPS error: %d" % self.id.infog[0]) |
def ContainsIgnoreCase(self, value):
"""Sets the type of the WHERE clause as "contains ignore case".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
"""
self._awql = self._CreateSingleValueCondition(value, 'CONTAINS_IGNORE_CASE')
return self._query_builder | Sets the type of the WHERE clause as "contains ignore case".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to. | Below is the the instruction that describes the task:
### Input:
Sets the type of the WHERE clause as "contains ignore case".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
### Response:
def ContainsIgnoreCase(self, value):
"""Sets the type of the WHERE clause as "contains ignore case".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
"""
self._awql = self._CreateSingleValueCondition(value, 'CONTAINS_IGNORE_CASE')
return self._query_builder |
def write_static_networks(gtfs, output_dir, fmt=None):
"""
Parameters
----------
gtfs: gtfspy.GTFS
output_dir: (str, unicode)
a path where to write
fmt: None, optional
defaulting to "edg" and writing results as ".edg" files
If "csv" csv files are produced instead
"""
if fmt is None:
fmt = "edg"
single_layer_networks = stop_to_stop_networks_by_type(gtfs)
util.makedirs(output_dir)
for route_type, net in single_layer_networks.items():
tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]
file_name = os.path.join(output_dir, "network_" + tag + "." + fmt)
if len(net.edges()) > 0:
_write_stop_to_stop_network_edges(net, file_name, fmt=fmt) | Parameters
----------
gtfs: gtfspy.GTFS
output_dir: (str, unicode)
a path where to write
fmt: None, optional
defaulting to "edg" and writing results as ".edg" files
If "csv" csv files are produced instead | Below is the the instruction that describes the task:
### Input:
Parameters
----------
gtfs: gtfspy.GTFS
output_dir: (str, unicode)
a path where to write
fmt: None, optional
defaulting to "edg" and writing results as ".edg" files
If "csv" csv files are produced instead
### Response:
def write_static_networks(gtfs, output_dir, fmt=None):
"""
Parameters
----------
gtfs: gtfspy.GTFS
output_dir: (str, unicode)
a path where to write
fmt: None, optional
defaulting to "edg" and writing results as ".edg" files
If "csv" csv files are produced instead
"""
if fmt is None:
fmt = "edg"
single_layer_networks = stop_to_stop_networks_by_type(gtfs)
util.makedirs(output_dir)
for route_type, net in single_layer_networks.items():
tag = route_types.ROUTE_TYPE_TO_LOWERCASE_TAG[route_type]
file_name = os.path.join(output_dir, "network_" + tag + "." + fmt)
if len(net.edges()) > 0:
_write_stop_to_stop_network_edges(net, file_name, fmt=fmt) |
def reinitialize_command(self, command, reinit_subcommands):
"""
Monkeypatch distutils.Distribution.reinitialize_command() to match behavior
of Distribution.get_command_obj()
This fixes a problem where 'pip install -e' does not reinitialise options
using the setup(options={...}) variable for the build_ext command.
This also effects other option sourcs such as setup.cfg.
"""
cmd_obj = _DISTUTILS_REINIT(self, command, reinit_subcommands)
options = self.command_options.get(command)
if options:
self._set_command_options(cmd_obj, options)
return cmd_obj | Monkeypatch distutils.Distribution.reinitialize_command() to match behavior
of Distribution.get_command_obj()
This fixes a problem where 'pip install -e' does not reinitialise options
using the setup(options={...}) variable for the build_ext command.
This also effects other option sourcs such as setup.cfg. | Below is the the instruction that describes the task:
### Input:
Monkeypatch distutils.Distribution.reinitialize_command() to match behavior
of Distribution.get_command_obj()
This fixes a problem where 'pip install -e' does not reinitialise options
using the setup(options={...}) variable for the build_ext command.
This also effects other option sourcs such as setup.cfg.
### Response:
def reinitialize_command(self, command, reinit_subcommands):
"""
Monkeypatch distutils.Distribution.reinitialize_command() to match behavior
of Distribution.get_command_obj()
This fixes a problem where 'pip install -e' does not reinitialise options
using the setup(options={...}) variable for the build_ext command.
This also effects other option sourcs such as setup.cfg.
"""
cmd_obj = _DISTUTILS_REINIT(self, command, reinit_subcommands)
options = self.command_options.get(command)
if options:
self._set_command_options(cmd_obj, options)
return cmd_obj |
def _words_by_distinctiveness_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None,
least_to_most=False):
"""Return words in `vocab` ordered by distinctiveness score."""
p_t = get_marginal_topic_distrib(doc_topic_distrib, doc_lengths)
distinct = get_word_distinctiveness(topic_word_distrib, p_t)
return _words_by_score(vocab, distinct, least_to_most=least_to_most, n=n) | Return words in `vocab` ordered by distinctiveness score. | Below is the the instruction that describes the task:
### Input:
Return words in `vocab` ordered by distinctiveness score.
### Response:
def _words_by_distinctiveness_score(vocab, topic_word_distrib, doc_topic_distrib, doc_lengths, n=None,
least_to_most=False):
"""Return words in `vocab` ordered by distinctiveness score."""
p_t = get_marginal_topic_distrib(doc_topic_distrib, doc_lengths)
distinct = get_word_distinctiveness(topic_word_distrib, p_t)
return _words_by_score(vocab, distinct, least_to_most=least_to_most, n=n) |
def get_encrypted_pin(self, clear_pin, card_number):
"""
Get PIN block in ISO 0 format, encrypted with the terminal key
"""
if not self.terminal_key:
print('Terminal key is not set')
return ''
if self.pinblock_format == '01':
try:
pinblock = bytes.fromhex(get_pinblock(clear_pin, card_number))
#print('PIN block: {}'.format(raw2str(pinblock)))
except TypeError:
return ''
encrypted_pinblock = self.tpk_cipher.encrypt(pinblock)
return raw2str(encrypted_pinblock)
else:
print('Unsupported PIN Block format')
return '' | Get PIN block in ISO 0 format, encrypted with the terminal key | Below is the the instruction that describes the task:
### Input:
Get PIN block in ISO 0 format, encrypted with the terminal key
### Response:
def get_encrypted_pin(self, clear_pin, card_number):
"""
Get PIN block in ISO 0 format, encrypted with the terminal key
"""
if not self.terminal_key:
print('Terminal key is not set')
return ''
if self.pinblock_format == '01':
try:
pinblock = bytes.fromhex(get_pinblock(clear_pin, card_number))
#print('PIN block: {}'.format(raw2str(pinblock)))
except TypeError:
return ''
encrypted_pinblock = self.tpk_cipher.encrypt(pinblock)
return raw2str(encrypted_pinblock)
else:
print('Unsupported PIN Block format')
return '' |
async def connect(self, channel_id: int):
""" Connects to a voice channel. """
ws = self._lavalink.bot._connection._get_websocket(int(self.guild_id))
await ws.voice_state(self.guild_id, str(channel_id)) | Connects to a voice channel. | Below is the the instruction that describes the task:
### Input:
Connects to a voice channel.
### Response:
async def connect(self, channel_id: int):
""" Connects to a voice channel. """
ws = self._lavalink.bot._connection._get_websocket(int(self.guild_id))
await ws.voice_state(self.guild_id, str(channel_id)) |
def find_stop(self, query, direction=""):
"""
Search the list of stops, optionally in a direction (inbound or outbound),
for the term passed to the function. Case insensitive, searches both the
stop name and ID. Yields a generator.
Defaults to both directions.
"""
_directions = ["inbound", "outbound", ""]
direction = direction.lower()
if direction == "inbound":
stops = self.inbound_stops
elif direction == "outbound":
stops = self.outbound_stops
else:
stops = self.inbound_stops + self.outbound_stops
found = []
for stop in stops:
q = str(query).lower()
if q in stop.name.lower() or q in str(stop.id).lower():
found.append(stop)
return found | Search the list of stops, optionally in a direction (inbound or outbound),
for the term passed to the function. Case insensitive, searches both the
stop name and ID. Yields a generator.
Defaults to both directions. | Below is the the instruction that describes the task:
### Input:
Search the list of stops, optionally in a direction (inbound or outbound),
for the term passed to the function. Case insensitive, searches both the
stop name and ID. Yields a generator.
Defaults to both directions.
### Response:
def find_stop(self, query, direction=""):
"""
Search the list of stops, optionally in a direction (inbound or outbound),
for the term passed to the function. Case insensitive, searches both the
stop name and ID. Yields a generator.
Defaults to both directions.
"""
_directions = ["inbound", "outbound", ""]
direction = direction.lower()
if direction == "inbound":
stops = self.inbound_stops
elif direction == "outbound":
stops = self.outbound_stops
else:
stops = self.inbound_stops + self.outbound_stops
found = []
for stop in stops:
q = str(query).lower()
if q in stop.name.lower() or q in str(stop.id).lower():
found.append(stop)
return found |
def creation_date(path_to_file, return_datetime=True):
"""
Retrieve a file's creation date.
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
:param path_to_file: File path
:param return_datetime: Bool, returns value in Datetime format
:return: Creation date
"""
if platform.system() == 'Windows':
created_at = os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
created_at = stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
created_at = stat.st_mtime
if return_datetime:
return datetime.fromtimestamp(created_at)
else:
return created_at | Retrieve a file's creation date.
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
:param path_to_file: File path
:param return_datetime: Bool, returns value in Datetime format
:return: Creation date | Below is the the instruction that describes the task:
### Input:
Retrieve a file's creation date.
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
:param path_to_file: File path
:param return_datetime: Bool, returns value in Datetime format
:return: Creation date
### Response:
def creation_date(path_to_file, return_datetime=True):
"""
Retrieve a file's creation date.
Try to get the date that a file was created, falling back to when it was
last modified if that isn't possible.
See http://stackoverflow.com/a/39501288/1709587 for explanation.
:param path_to_file: File path
:param return_datetime: Bool, returns value in Datetime format
:return: Creation date
"""
if platform.system() == 'Windows':
created_at = os.path.getctime(path_to_file)
else:
stat = os.stat(path_to_file)
try:
created_at = stat.st_birthtime
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
created_at = stat.st_mtime
if return_datetime:
return datetime.fromtimestamp(created_at)
else:
return created_at |
def run(self, path_or_tests, dot_env_path=None, mapping=None):
""" main interface.
Args:
path_or_tests:
str: testcase/testsuite file/foler path
dict: valid testcase/testsuite data
"""
if validator.is_testcase_path(path_or_tests):
return self.run_path(path_or_tests, dot_env_path, mapping)
elif validator.is_testcases(path_or_tests):
return self.run_tests(path_or_tests)
else:
raise exceptions.ParamsError("Invalid testcase path or testcases: {}".format(path_or_tests)) | main interface.
Args:
path_or_tests:
str: testcase/testsuite file/foler path
dict: valid testcase/testsuite data | Below is the the instruction that describes the task:
### Input:
main interface.
Args:
path_or_tests:
str: testcase/testsuite file/foler path
dict: valid testcase/testsuite data
### Response:
def run(self, path_or_tests, dot_env_path=None, mapping=None):
""" main interface.
Args:
path_or_tests:
str: testcase/testsuite file/foler path
dict: valid testcase/testsuite data
"""
if validator.is_testcase_path(path_or_tests):
return self.run_path(path_or_tests, dot_env_path, mapping)
elif validator.is_testcases(path_or_tests):
return self.run_tests(path_or_tests)
else:
raise exceptions.ParamsError("Invalid testcase path or testcases: {}".format(path_or_tests)) |
def _get_modpkg_path(dotted_name, pathlist=None):
"""Get the filesystem path for a module or a package.
Return the file system path to a file for a module, and to a directory for
a package. Return None if the name is not found, or is a builtin or
extension module.
"""
# split off top-most name
parts = dotted_name.split('.', 1)
if len(parts) > 1:
# we have a dotted path, import top-level package
try:
file, pathname, description = imp.find_module(parts[0], pathlist)
if file: file.close()
except ImportError:
return None
# check if it's indeed a package
if description[2] == imp.PKG_DIRECTORY:
# recursively handle the remaining name parts
pathname = _get_modpkg_path(parts[1], [pathname])
else:
pathname = None
else:
# plain name
try:
file, pathname, description = imp.find_module(
dotted_name, pathlist)
if file:
file.close()
if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]:
pathname = None
except ImportError:
pathname = None
return pathname | Get the filesystem path for a module or a package.
Return the file system path to a file for a module, and to a directory for
a package. Return None if the name is not found, or is a builtin or
extension module. | Below is the the instruction that describes the task:
### Input:
Get the filesystem path for a module or a package.
Return the file system path to a file for a module, and to a directory for
a package. Return None if the name is not found, or is a builtin or
extension module.
### Response:
def _get_modpkg_path(dotted_name, pathlist=None):
"""Get the filesystem path for a module or a package.
Return the file system path to a file for a module, and to a directory for
a package. Return None if the name is not found, or is a builtin or
extension module.
"""
# split off top-most name
parts = dotted_name.split('.', 1)
if len(parts) > 1:
# we have a dotted path, import top-level package
try:
file, pathname, description = imp.find_module(parts[0], pathlist)
if file: file.close()
except ImportError:
return None
# check if it's indeed a package
if description[2] == imp.PKG_DIRECTORY:
# recursively handle the remaining name parts
pathname = _get_modpkg_path(parts[1], [pathname])
else:
pathname = None
else:
# plain name
try:
file, pathname, description = imp.find_module(
dotted_name, pathlist)
if file:
file.close()
if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]:
pathname = None
except ImportError:
pathname = None
return pathname |
def printMe(self, selfKey, selfValue):
'''Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text
'''
text = '<key>{keyName}</key>\n'.format(keyName=selfKey)
if len(selfValue) == 0:
return ''
else:
valueText = ''
for element in selfValue:
if singleOrPair(element) == 'Single':
valueText += element.printMe(element.tag, element.value)
elif singleOrPair(element) == 'Pair':
valueText += element.printMe(element.key, element.value)
# maybe a else statement for non single non pair?
text += valueText
return text | Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text | Below is the the instruction that describes the task:
### Input:
Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text
### Response:
def printMe(self, selfKey, selfValue):
'''Parse the single and its value and return the parsed str.
Args:
selfTag (str): The tag. Normally just ``self.tag``
selfValue (list): a list of value elements(single, subclasses, str, int). Normally just ``self.value``
Returns:
str: A parsed text
'''
text = '<key>{keyName}</key>\n'.format(keyName=selfKey)
if len(selfValue) == 0:
return ''
else:
valueText = ''
for element in selfValue:
if singleOrPair(element) == 'Single':
valueText += element.printMe(element.tag, element.value)
elif singleOrPair(element) == 'Pair':
valueText += element.printMe(element.key, element.value)
# maybe a else statement for non single non pair?
text += valueText
return text |
def _is_end(event, node, tagName): # pylint: disable=invalid-name
"""Return true if (event, node) is an end event for tagname."""
return event == pulldom.END_ELEMENT and node.tagName == tagName | Return true if (event, node) is an end event for tagname. | Below is the the instruction that describes the task:
### Input:
Return true if (event, node) is an end event for tagname.
### Response:
def _is_end(event, node, tagName): # pylint: disable=invalid-name
"""Return true if (event, node) is an end event for tagname."""
return event == pulldom.END_ELEMENT and node.tagName == tagName |
def get_snapshot(self, snapshot_id_or_uri, volume_id_or_uri=None):
"""
Gets a snapshot of a volume.
Args:
volume_id_or_uri:
Can be either the volume ID or the volume URI. It is optional if it is passed a snapshot URI,
but required if it passed a snapshot ID.
snapshot_id_or_uri:
Can be either the snapshot ID or the snapshot URI.
Returns:
dict: The snapshot.
"""
uri = self.__build_volume_snapshot_uri(volume_id_or_uri, snapshot_id_or_uri)
return self._client.get(uri) | Gets a snapshot of a volume.
Args:
volume_id_or_uri:
Can be either the volume ID or the volume URI. It is optional if it is passed a snapshot URI,
but required if it passed a snapshot ID.
snapshot_id_or_uri:
Can be either the snapshot ID or the snapshot URI.
Returns:
dict: The snapshot. | Below is the the instruction that describes the task:
### Input:
Gets a snapshot of a volume.
Args:
volume_id_or_uri:
Can be either the volume ID or the volume URI. It is optional if it is passed a snapshot URI,
but required if it passed a snapshot ID.
snapshot_id_or_uri:
Can be either the snapshot ID or the snapshot URI.
Returns:
dict: The snapshot.
### Response:
def get_snapshot(self, snapshot_id_or_uri, volume_id_or_uri=None):
"""
Gets a snapshot of a volume.
Args:
volume_id_or_uri:
Can be either the volume ID or the volume URI. It is optional if it is passed a snapshot URI,
but required if it passed a snapshot ID.
snapshot_id_or_uri:
Can be either the snapshot ID or the snapshot URI.
Returns:
dict: The snapshot.
"""
uri = self.__build_volume_snapshot_uri(volume_id_or_uri, snapshot_id_or_uri)
return self._client.get(uri) |
def _sparse_tensor_dense_matmul(sp_a, b, **kwargs):
"""Returns (batched) matmul of a SparseTensor with a Tensor.
Args:
sp_a: `SparseTensor` representing a (batch of) matrices.
b: `Tensor` representing a (batch of) matrices, with the same batch shape of
`sp_a`. The shape must be compatible with the shape of `sp_a` and kwargs.
**kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul`.
Returns:
product: A dense (batch of) matrix-shaped Tensor of the same batch shape and
dtype as `sp_a` and `b`. If `sp_a` or `b` is adjointed through `kwargs` then
the shape is adjusted accordingly.
"""
batch_shape = _get_shape(sp_a)[:-2]
# Reshape the SparseTensor into a rank 3 SparseTensors, with the
# batch shape flattened to a single dimension. If the batch rank is 0, then
# we add a batch dimension of rank 1.
sp_a = tf.sparse.reshape(sp_a, tf.concat([[-1], _get_shape(sp_a)[-2:]],
axis=0))
# Reshape b to stack the batch dimension along the rows.
b = tf.reshape(b, tf.concat([[-1], _get_shape(b)[-1:]], axis=0))
# Convert the SparseTensor to a matrix in block diagonal form with blocks of
# matrices [M, N]. This allow us to use tf.sparse_tensor_dense_matmul which
# only accepts rank 2 (Sparse)Tensors.
out = tf.sparse.sparse_dense_matmul(_sparse_block_diag(sp_a), b, **kwargs)
# Finally retrieve the original batch shape from the resulting rank 2 Tensor.
# Note that we avoid inferring the final shape from `sp_a` or `b` because we
# might have transposed one or both of them.
return tf.reshape(
out,
tf.concat([batch_shape, [-1], _get_shape(out)[-1:]], axis=0)) | Returns (batched) matmul of a SparseTensor with a Tensor.
Args:
sp_a: `SparseTensor` representing a (batch of) matrices.
b: `Tensor` representing a (batch of) matrices, with the same batch shape of
`sp_a`. The shape must be compatible with the shape of `sp_a` and kwargs.
**kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul`.
Returns:
product: A dense (batch of) matrix-shaped Tensor of the same batch shape and
dtype as `sp_a` and `b`. If `sp_a` or `b` is adjointed through `kwargs` then
the shape is adjusted accordingly. | Below is the the instruction that describes the task:
### Input:
Returns (batched) matmul of a SparseTensor with a Tensor.
Args:
sp_a: `SparseTensor` representing a (batch of) matrices.
b: `Tensor` representing a (batch of) matrices, with the same batch shape of
`sp_a`. The shape must be compatible with the shape of `sp_a` and kwargs.
**kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul`.
Returns:
product: A dense (batch of) matrix-shaped Tensor of the same batch shape and
dtype as `sp_a` and `b`. If `sp_a` or `b` is adjointed through `kwargs` then
the shape is adjusted accordingly.
### Response:
def _sparse_tensor_dense_matmul(sp_a, b, **kwargs):
"""Returns (batched) matmul of a SparseTensor with a Tensor.
Args:
sp_a: `SparseTensor` representing a (batch of) matrices.
b: `Tensor` representing a (batch of) matrices, with the same batch shape of
`sp_a`. The shape must be compatible with the shape of `sp_a` and kwargs.
**kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul`.
Returns:
product: A dense (batch of) matrix-shaped Tensor of the same batch shape and
dtype as `sp_a` and `b`. If `sp_a` or `b` is adjointed through `kwargs` then
the shape is adjusted accordingly.
"""
batch_shape = _get_shape(sp_a)[:-2]
# Reshape the SparseTensor into a rank 3 SparseTensors, with the
# batch shape flattened to a single dimension. If the batch rank is 0, then
# we add a batch dimension of rank 1.
sp_a = tf.sparse.reshape(sp_a, tf.concat([[-1], _get_shape(sp_a)[-2:]],
axis=0))
# Reshape b to stack the batch dimension along the rows.
b = tf.reshape(b, tf.concat([[-1], _get_shape(b)[-1:]], axis=0))
# Convert the SparseTensor to a matrix in block diagonal form with blocks of
# matrices [M, N]. This allow us to use tf.sparse_tensor_dense_matmul which
# only accepts rank 2 (Sparse)Tensors.
out = tf.sparse.sparse_dense_matmul(_sparse_block_diag(sp_a), b, **kwargs)
# Finally retrieve the original batch shape from the resulting rank 2 Tensor.
# Note that we avoid inferring the final shape from `sp_a` or `b` because we
# might have transposed one or both of them.
return tf.reshape(
out,
tf.concat([batch_shape, [-1], _get_shape(out)[-1:]], axis=0)) |
def Convert(self, metadata, checkresult, token=None):
"""Converts a single CheckResult.
Args:
metadata: ExportedMetadata to be used for conversion.
checkresult: CheckResult to be converted.
token: Security token.
Yields:
Resulting ExportedCheckResult. Empty list is a valid result and means that
conversion wasn't possible.
"""
if checkresult.HasField("anomaly"):
for anomaly in checkresult.anomaly:
exported_anomaly = ExportedAnomaly(
type=anomaly.type,
severity=anomaly.severity,
confidence=anomaly.confidence)
if anomaly.symptom:
exported_anomaly.symptom = anomaly.symptom
if anomaly.explanation:
exported_anomaly.explanation = anomaly.explanation
if anomaly.generated_by:
exported_anomaly.generated_by = anomaly.generated_by
if anomaly.anomaly_reference_id:
exported_anomaly.anomaly_reference_id = "\n".join(
anomaly.anomaly_reference_id)
if anomaly.finding:
exported_anomaly.finding = "\n".join(anomaly.finding)
yield ExportedCheckResult(
metadata=metadata,
check_id=checkresult.check_id,
anomaly=exported_anomaly)
else:
yield ExportedCheckResult(
metadata=metadata, check_id=checkresult.check_id) | Converts a single CheckResult.
Args:
metadata: ExportedMetadata to be used for conversion.
checkresult: CheckResult to be converted.
token: Security token.
Yields:
Resulting ExportedCheckResult. Empty list is a valid result and means that
conversion wasn't possible. | Below is the the instruction that describes the task:
### Input:
Converts a single CheckResult.
Args:
metadata: ExportedMetadata to be used for conversion.
checkresult: CheckResult to be converted.
token: Security token.
Yields:
Resulting ExportedCheckResult. Empty list is a valid result and means that
conversion wasn't possible.
### Response:
def Convert(self, metadata, checkresult, token=None):
"""Converts a single CheckResult.
Args:
metadata: ExportedMetadata to be used for conversion.
checkresult: CheckResult to be converted.
token: Security token.
Yields:
Resulting ExportedCheckResult. Empty list is a valid result and means that
conversion wasn't possible.
"""
if checkresult.HasField("anomaly"):
for anomaly in checkresult.anomaly:
exported_anomaly = ExportedAnomaly(
type=anomaly.type,
severity=anomaly.severity,
confidence=anomaly.confidence)
if anomaly.symptom:
exported_anomaly.symptom = anomaly.symptom
if anomaly.explanation:
exported_anomaly.explanation = anomaly.explanation
if anomaly.generated_by:
exported_anomaly.generated_by = anomaly.generated_by
if anomaly.anomaly_reference_id:
exported_anomaly.anomaly_reference_id = "\n".join(
anomaly.anomaly_reference_id)
if anomaly.finding:
exported_anomaly.finding = "\n".join(anomaly.finding)
yield ExportedCheckResult(
metadata=metadata,
check_id=checkresult.check_id,
anomaly=exported_anomaly)
else:
yield ExportedCheckResult(
metadata=metadata, check_id=checkresult.check_id) |
def get_query(self):
"""
Default filters for model
"""
return (
super().get_query()
.filter(or_(models.DagModel.is_active,
models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
) | Default filters for model | Below is the the instruction that describes the task:
### Input:
Default filters for model
### Response:
def get_query(self):
"""
Default filters for model
"""
return (
super().get_query()
.filter(or_(models.DagModel.is_active,
models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
) |
def _set_request_auth_type_metric(self, request):
"""
Add metric 'request_auth_type' for the authentication type used.
NOTE: This is a best guess at this point. Possible values include:
no-user
unauthenticated
jwt/bearer/other-token-type
session-or-unknown (catch all)
"""
if 'HTTP_AUTHORIZATION' in request.META and request.META['HTTP_AUTHORIZATION']:
token_parts = request.META['HTTP_AUTHORIZATION'].split()
# Example: "JWT eyJhbGciO..."
if len(token_parts) == 2:
auth_type = token_parts[0].lower() # 'jwt' or 'bearer' (for example)
else:
auth_type = 'other-token-type'
elif not hasattr(request, 'user') or not request.user:
auth_type = 'no-user'
elif not request.user.is_authenticated:
auth_type = 'unauthenticated'
else:
auth_type = 'session-or-unknown'
monitoring.set_custom_metric('request_auth_type', auth_type) | Add metric 'request_auth_type' for the authentication type used.
NOTE: This is a best guess at this point. Possible values include:
no-user
unauthenticated
jwt/bearer/other-token-type
session-or-unknown (catch all) | Below is the the instruction that describes the task:
### Input:
Add metric 'request_auth_type' for the authentication type used.
NOTE: This is a best guess at this point. Possible values include:
no-user
unauthenticated
jwt/bearer/other-token-type
session-or-unknown (catch all)
### Response:
def _set_request_auth_type_metric(self, request):
"""
Add metric 'request_auth_type' for the authentication type used.
NOTE: This is a best guess at this point. Possible values include:
no-user
unauthenticated
jwt/bearer/other-token-type
session-or-unknown (catch all)
"""
if 'HTTP_AUTHORIZATION' in request.META and request.META['HTTP_AUTHORIZATION']:
token_parts = request.META['HTTP_AUTHORIZATION'].split()
# Example: "JWT eyJhbGciO..."
if len(token_parts) == 2:
auth_type = token_parts[0].lower() # 'jwt' or 'bearer' (for example)
else:
auth_type = 'other-token-type'
elif not hasattr(request, 'user') or not request.user:
auth_type = 'no-user'
elif not request.user.is_authenticated:
auth_type = 'unauthenticated'
else:
auth_type = 'session-or-unknown'
monitoring.set_custom_metric('request_auth_type', auth_type) |
def read_value(self, offset=0):
"""
Reads the value of this descriptor.
When successful, the value will be returned, otherwise `descriptor_read_value_failed()` of the related
device is invoked.
"""
try:
val = self._object.ReadValue(
{'offset': dbus.UInt16(offset, variant_level=1)},
dbus_interface='org.bluez.GattDescriptor1')
return val
except dbus.exceptions.DBusException as e:
error = _error_from_dbus_error(e)
self.service.device.descriptor_read_value_failed(self, error=error) | Reads the value of this descriptor.
When successful, the value will be returned, otherwise `descriptor_read_value_failed()` of the related
device is invoked. | Below is the the instruction that describes the task:
### Input:
Reads the value of this descriptor.
When successful, the value will be returned, otherwise `descriptor_read_value_failed()` of the related
device is invoked.
### Response:
def read_value(self, offset=0):
"""
Reads the value of this descriptor.
When successful, the value will be returned, otherwise `descriptor_read_value_failed()` of the related
device is invoked.
"""
try:
val = self._object.ReadValue(
{'offset': dbus.UInt16(offset, variant_level=1)},
dbus_interface='org.bluez.GattDescriptor1')
return val
except dbus.exceptions.DBusException as e:
error = _error_from_dbus_error(e)
self.service.device.descriptor_read_value_failed(self, error=error) |
def recipient(self):
"""
:returns: A :class:`~okcupyd.profile.Profile` instance belonging
to the recipient of this message.
"""
return (self._message_thread.correspondent_profile
if 'from_me' in self._message_element.attrib['class']
else self._message_thread.user_profile) | :returns: A :class:`~okcupyd.profile.Profile` instance belonging
to the recipient of this message. | Below is the the instruction that describes the task:
### Input:
:returns: A :class:`~okcupyd.profile.Profile` instance belonging
to the recipient of this message.
### Response:
def recipient(self):
"""
:returns: A :class:`~okcupyd.profile.Profile` instance belonging
to the recipient of this message.
"""
return (self._message_thread.correspondent_profile
if 'from_me' in self._message_element.attrib['class']
else self._message_thread.user_profile) |
def p_notificationTypeClause(self, p):
"""notificationTypeClause : fuzzy_lowercase_identifier NOTIFICATION_TYPE NotificationObjectsPart STATUS Status DESCRIPTION Text ReferPart COLON_COLON_EQUAL '{' NotificationName '}'""" # some MIBs have uppercase and/or lowercase id
p[0] = ('notificationTypeClause', p[1], # id
# p[2], # NOTIFICATION_TYPE
p[3], # NotificationObjectsPart
p[5], # status
(p[6], p[7]), # description
p[8], # Reference
p[11]) | notificationTypeClause : fuzzy_lowercase_identifier NOTIFICATION_TYPE NotificationObjectsPart STATUS Status DESCRIPTION Text ReferPart COLON_COLON_EQUAL '{' NotificationName '} | Below is the the instruction that describes the task:
### Input:
notificationTypeClause : fuzzy_lowercase_identifier NOTIFICATION_TYPE NotificationObjectsPart STATUS Status DESCRIPTION Text ReferPart COLON_COLON_EQUAL '{' NotificationName '}
### Response:
def p_notificationTypeClause(self, p):
"""notificationTypeClause : fuzzy_lowercase_identifier NOTIFICATION_TYPE NotificationObjectsPart STATUS Status DESCRIPTION Text ReferPart COLON_COLON_EQUAL '{' NotificationName '}'""" # some MIBs have uppercase and/or lowercase id
p[0] = ('notificationTypeClause', p[1], # id
# p[2], # NOTIFICATION_TYPE
p[3], # NotificationObjectsPart
p[5], # status
(p[6], p[7]), # description
p[8], # Reference
p[11]) |
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False | Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False | Below is the the instruction that describes the task:
### Input:
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
### Response:
def poweroff_server(self, server=None, server_id=None):
"""
Poweroff a VM. If possible to pass the VM object or simply the ID
of the VM that we want to turn on.
Args:
server: VM Object that represent the VM to power off,
server_id: Int or Str representing the ID of the VM to power off.
Returns:
return True if json_obj['Success'] is 'True' else False
"""
sid = server_id if server_id is not None else server.sid
if sid is None:
raise Exception('No Server Specified.')
json_scheme = self.gen_def_json_scheme('SetEnqueueServerPowerOff', dict(ServerId=sid))
json_obj = self.call_method_post('SetEnqueueServerPowerOff', json_scheme=json_scheme)
return True if json_obj['Success'] is 'True' else False |
def create_binary_descriptor(descriptor):
"""Convert a string node descriptor into a 20-byte binary descriptor.
This is the inverse operation of parse_binary_descriptor and composing
the two operations is a noop.
Args:
descriptor (str): A string node descriptor
Returns:
bytes: A 20-byte binary node descriptor.
"""
func_names = {0: 'copy_latest_a', 1: 'average_a',
2: 'copy_all_a', 3: 'sum_a',
4: 'copy_count_a', 5: 'trigger_streamer',
6: 'call_rpc', 7: 'subtract_afromb'}
func_codes = {y: x for x, y in func_names.items()}
node, inputs, processing = parse_node_descriptor(descriptor, DeviceModel())
func_code = func_codes.get(processing)
if func_code is None:
raise ArgumentError("Unknown processing function", function=processing)
stream_a, trigger_a = inputs[0]
stream_a = stream_a.encode()
if len(inputs) == 2:
stream_b, trigger_b = inputs[1]
stream_b = stream_b.encode()
else:
stream_b, trigger_b = 0xFFFF, None
if trigger_a is None:
trigger_a = TrueTrigger()
if trigger_b is None:
trigger_b = TrueTrigger()
ref_a = 0
if isinstance(trigger_a, InputTrigger):
ref_a = trigger_a.reference
ref_b = 0
if isinstance(trigger_b, InputTrigger):
ref_b = trigger_b.reference
trigger_a = _create_binary_trigger(trigger_a)
trigger_b = _create_binary_trigger(trigger_b)
combiner = node.trigger_combiner
bin_desc = struct.pack("<LLHHHBBBB2x", ref_a, ref_b, node.stream.encode(), stream_a, stream_b, func_code, trigger_a, trigger_b, combiner)
return bin_desc | Convert a string node descriptor into a 20-byte binary descriptor.
This is the inverse operation of parse_binary_descriptor and composing
the two operations is a noop.
Args:
descriptor (str): A string node descriptor
Returns:
bytes: A 20-byte binary node descriptor. | Below is the the instruction that describes the task:
### Input:
Convert a string node descriptor into a 20-byte binary descriptor.
This is the inverse operation of parse_binary_descriptor and composing
the two operations is a noop.
Args:
descriptor (str): A string node descriptor
Returns:
bytes: A 20-byte binary node descriptor.
### Response:
def create_binary_descriptor(descriptor):
"""Convert a string node descriptor into a 20-byte binary descriptor.
This is the inverse operation of parse_binary_descriptor and composing
the two operations is a noop.
Args:
descriptor (str): A string node descriptor
Returns:
bytes: A 20-byte binary node descriptor.
"""
func_names = {0: 'copy_latest_a', 1: 'average_a',
2: 'copy_all_a', 3: 'sum_a',
4: 'copy_count_a', 5: 'trigger_streamer',
6: 'call_rpc', 7: 'subtract_afromb'}
func_codes = {y: x for x, y in func_names.items()}
node, inputs, processing = parse_node_descriptor(descriptor, DeviceModel())
func_code = func_codes.get(processing)
if func_code is None:
raise ArgumentError("Unknown processing function", function=processing)
stream_a, trigger_a = inputs[0]
stream_a = stream_a.encode()
if len(inputs) == 2:
stream_b, trigger_b = inputs[1]
stream_b = stream_b.encode()
else:
stream_b, trigger_b = 0xFFFF, None
if trigger_a is None:
trigger_a = TrueTrigger()
if trigger_b is None:
trigger_b = TrueTrigger()
ref_a = 0
if isinstance(trigger_a, InputTrigger):
ref_a = trigger_a.reference
ref_b = 0
if isinstance(trigger_b, InputTrigger):
ref_b = trigger_b.reference
trigger_a = _create_binary_trigger(trigger_a)
trigger_b = _create_binary_trigger(trigger_b)
combiner = node.trigger_combiner
bin_desc = struct.pack("<LLHHHBBBB2x", ref_a, ref_b, node.stream.encode(), stream_a, stream_b, func_code, trigger_a, trigger_b, combiner)
return bin_desc |
def get_tournament(self, tag: crtag, timeout=0):
"""Get a tournament information
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.TOURNAMENT + '/' + tag
return self._get_model(url, PartialTournament, timeout=timeout) | Get a tournament information
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout | Below is the the instruction that describes the task:
### Input:
Get a tournament information
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
### Response:
def get_tournament(self, tag: crtag, timeout=0):
"""Get a tournament information
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.TOURNAMENT + '/' + tag
return self._get_model(url, PartialTournament, timeout=timeout) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.