code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def post(request):
"""Creates a tag object
:param name: Name for tag
:type name: str
:returns: json
"""
res = Result()
data = request.POST or json.loads(request.body)['body']
name = data.get('name', None)
if not name:
res.isError = True
res.message = "No name given"
return JsonResponse(res.asDict())
tag = Tag.objects.get_or_create(name=name.lower())[0]
res.append(tag.json())
return JsonResponse(res.asDict()) | Creates a tag object
:param name: Name for tag
:type name: str
:returns: json | Below is the the instruction that describes the task:
### Input:
Creates a tag object
:param name: Name for tag
:type name: str
:returns: json
### Response:
def post(request):
"""Creates a tag object
:param name: Name for tag
:type name: str
:returns: json
"""
res = Result()
data = request.POST or json.loads(request.body)['body']
name = data.get('name', None)
if not name:
res.isError = True
res.message = "No name given"
return JsonResponse(res.asDict())
tag = Tag.objects.get_or_create(name=name.lower())[0]
res.append(tag.json())
return JsonResponse(res.asDict()) |
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.get("stream", StringIO())
self.primary_key = options.get("primary_key", None)
self.properties = options.get("properties")
self.geometry_field = options.get("geometry_field", "geom")
self.use_natural_keys = options.get("use_natural_keys", False)
self.bbox = options.get("bbox", None)
self.bbox_auto = options.get("bbox_auto", None)
self.srid = options.get("srid", GEOJSON_DEFAULT_SRID)
self.crs = options.get("crs", True)
self.start_serialization()
if ValuesQuerySet is not None and isinstance(queryset, ValuesQuerySet):
self.serialize_values_queryset(queryset)
elif isinstance(queryset, list):
self.serialize_object_list(queryset)
elif isinstance(queryset, QuerySet):
self.serialize_queryset(queryset)
self.end_serialization()
return self.getvalue() | Serialize a queryset. | Below is the the instruction that describes the task:
### Input:
Serialize a queryset.
### Response:
def serialize(self, queryset, **options):
"""
Serialize a queryset.
"""
self.options = options
self.stream = options.get("stream", StringIO())
self.primary_key = options.get("primary_key", None)
self.properties = options.get("properties")
self.geometry_field = options.get("geometry_field", "geom")
self.use_natural_keys = options.get("use_natural_keys", False)
self.bbox = options.get("bbox", None)
self.bbox_auto = options.get("bbox_auto", None)
self.srid = options.get("srid", GEOJSON_DEFAULT_SRID)
self.crs = options.get("crs", True)
self.start_serialization()
if ValuesQuerySet is not None and isinstance(queryset, ValuesQuerySet):
self.serialize_values_queryset(queryset)
elif isinstance(queryset, list):
self.serialize_object_list(queryset)
elif isinstance(queryset, QuerySet):
self.serialize_queryset(queryset)
self.end_serialization()
return self.getvalue() |
def make_plot(
self, count, plot=None, show=False, plottype='probability',
bar=dict(alpha=0.15, color='b', linewidth=1.0, edgecolor='b'),
errorbar=dict(fmt='b.'),
gaussian=dict(ls='--', c='r')
):
""" Convert histogram counts in array ``count`` into a plot.
Args:
count (array): Array of histogram counts (see
:meth:`PDFHistogram.count`).
plot (plotter): :mod:`matplotlib` plotting window. If ``None``
uses the default window. Default is ``None``.
show (boolean): Displayes plot if ``True``; otherwise returns
the plot. Default is ``False``.
plottype (str): The probabilities in each bin are plotted if
``plottype='probability'`` (default). The average probability
density is plot if ``plottype='density'``. The
cumulative probability is plotted if ``plottype=cumulative``.
bar (dictionary): Additional plotting arguments for the bar graph
showing the histogram. This part of the plot is omitted
if ``bar=None``.
errorbar (dictionary): Additional plotting arguments for the
errorbar graph, showing error bars on the histogram. This
part of the plot is omitted if ``errorbar=None``.
gaussian (dictionary): Additional plotting arguments for the
plot of the Gaussian probability for the |GVar| (``g``)
specified in the initialization. This part of the plot
is omitted if ``gaussian=None`` or if no ``g`` was
specified.
"""
if numpy.ndim(count) != 1:
raise ValueError('count must have dimension 1')
if plot is None:
import matplotlib.pyplot as plot
if len(count) == len(self.midpoints) + 2:
norm = numpy.sum(count)
data = numpy.asarray(count[1:-1]) / norm
elif len(count) != len(self.midpoints):
raise ValueError(
'wrong data length: %s != %s'
% (len(count), len(self.midpoints))
)
else:
data = numpy.asarray(count)
if plottype == 'cumulative':
data = numpy.cumsum(data)
data = numpy.array([0.] + data.tolist())
data_sdev = sdev(data)
if not numpy.all(data_sdev == 0.0):
data_mean = mean(data)
plot.errorbar(self.bins, data_mean, data_sdev, **errorbar)
if bar is not None:
plot.fill_between(self.bins, 0, data_mean, **bar)
# mean, +- 1 sigma lines
plot.plot([self.bins[0], self.bins[-1]], [0.5, 0.5], 'k:')
plot.plot([self.bins[0], self.bins[-1]], [0.158655254, 0.158655254], 'k:')
plot.plot([self.bins[0], self.bins[-1]], [0.841344746, 0.841344746], 'k:')
else:
if plottype == 'density':
data = data / self.widths
if errorbar is not None:
data_sdev = sdev(data)
if not numpy.all(data_sdev == 0.0):
data_mean = mean(data)
plot.errorbar(self.midpoints, data_mean, data_sdev, **errorbar)
if bar is not None:
plot.bar(self.bins[:-1], mean(data), width=self.widths, align='edge', **bar)
if gaussian is not None and self.g is not None:
# spline goes through the errorbar points for gaussian stats
if plottype == 'cumulative':
x = numpy.array(self.bins.tolist() + self.midpoints.tolist())
x.sort()
dx = (x - self.g.mean) / self.g.sdev
y = (erf(dx / 2**0.5) + 1) / 2.
yspline = cspline.CSpline(x, y)
plot.ylabel('cumulative probability')
plot.ylim(0, 1.0)
elif plottype in ['density', 'probability']:
x = self.bins
dx = (x - self.g.mean) / self.g.sdev
y = (erf(dx / 2**0.5) + 1) / 2.
x = self.midpoints
y = (y[1:] - y[:-1])
if plottype == 'density':
y /= self.widths
plot.ylabel('probability density')
else:
plot.ylabel('probability')
yspline = cspline.CSpline(x, y)
else:
raise ValueError('unknown plottype: ' + str(plottype))
if len(x) < 100:
ny = int(100. / len(x) + 0.5) * len(x)
else:
ny = len(x)
xplot = numpy.linspace(x[0], x[-1], ny)
plot.plot(xplot, yspline(xplot), **gaussian)
if show:
plot.show()
return plot | Convert histogram counts in array ``count`` into a plot.
Args:
count (array): Array of histogram counts (see
:meth:`PDFHistogram.count`).
plot (plotter): :mod:`matplotlib` plotting window. If ``None``
uses the default window. Default is ``None``.
show (boolean): Displayes plot if ``True``; otherwise returns
the plot. Default is ``False``.
plottype (str): The probabilities in each bin are plotted if
``plottype='probability'`` (default). The average probability
density is plot if ``plottype='density'``. The
cumulative probability is plotted if ``plottype=cumulative``.
bar (dictionary): Additional plotting arguments for the bar graph
showing the histogram. This part of the plot is omitted
if ``bar=None``.
errorbar (dictionary): Additional plotting arguments for the
errorbar graph, showing error bars on the histogram. This
part of the plot is omitted if ``errorbar=None``.
gaussian (dictionary): Additional plotting arguments for the
plot of the Gaussian probability for the |GVar| (``g``)
specified in the initialization. This part of the plot
is omitted if ``gaussian=None`` or if no ``g`` was
specified. | Below is the the instruction that describes the task:
### Input:
Convert histogram counts in array ``count`` into a plot.
Args:
count (array): Array of histogram counts (see
:meth:`PDFHistogram.count`).
plot (plotter): :mod:`matplotlib` plotting window. If ``None``
uses the default window. Default is ``None``.
show (boolean): Displayes plot if ``True``; otherwise returns
the plot. Default is ``False``.
plottype (str): The probabilities in each bin are plotted if
``plottype='probability'`` (default). The average probability
density is plot if ``plottype='density'``. The
cumulative probability is plotted if ``plottype=cumulative``.
bar (dictionary): Additional plotting arguments for the bar graph
showing the histogram. This part of the plot is omitted
if ``bar=None``.
errorbar (dictionary): Additional plotting arguments for the
errorbar graph, showing error bars on the histogram. This
part of the plot is omitted if ``errorbar=None``.
gaussian (dictionary): Additional plotting arguments for the
plot of the Gaussian probability for the |GVar| (``g``)
specified in the initialization. This part of the plot
is omitted if ``gaussian=None`` or if no ``g`` was
specified.
### Response:
def make_plot(
self, count, plot=None, show=False, plottype='probability',
bar=dict(alpha=0.15, color='b', linewidth=1.0, edgecolor='b'),
errorbar=dict(fmt='b.'),
gaussian=dict(ls='--', c='r')
):
""" Convert histogram counts in array ``count`` into a plot.
Args:
count (array): Array of histogram counts (see
:meth:`PDFHistogram.count`).
plot (plotter): :mod:`matplotlib` plotting window. If ``None``
uses the default window. Default is ``None``.
show (boolean): Displayes plot if ``True``; otherwise returns
the plot. Default is ``False``.
plottype (str): The probabilities in each bin are plotted if
``plottype='probability'`` (default). The average probability
density is plot if ``plottype='density'``. The
cumulative probability is plotted if ``plottype=cumulative``.
bar (dictionary): Additional plotting arguments for the bar graph
showing the histogram. This part of the plot is omitted
if ``bar=None``.
errorbar (dictionary): Additional plotting arguments for the
errorbar graph, showing error bars on the histogram. This
part of the plot is omitted if ``errorbar=None``.
gaussian (dictionary): Additional plotting arguments for the
plot of the Gaussian probability for the |GVar| (``g``)
specified in the initialization. This part of the plot
is omitted if ``gaussian=None`` or if no ``g`` was
specified.
"""
if numpy.ndim(count) != 1:
raise ValueError('count must have dimension 1')
if plot is None:
import matplotlib.pyplot as plot
if len(count) == len(self.midpoints) + 2:
norm = numpy.sum(count)
data = numpy.asarray(count[1:-1]) / norm
elif len(count) != len(self.midpoints):
raise ValueError(
'wrong data length: %s != %s'
% (len(count), len(self.midpoints))
)
else:
data = numpy.asarray(count)
if plottype == 'cumulative':
data = numpy.cumsum(data)
data = numpy.array([0.] + data.tolist())
data_sdev = sdev(data)
if not numpy.all(data_sdev == 0.0):
data_mean = mean(data)
plot.errorbar(self.bins, data_mean, data_sdev, **errorbar)
if bar is not None:
plot.fill_between(self.bins, 0, data_mean, **bar)
# mean, +- 1 sigma lines
plot.plot([self.bins[0], self.bins[-1]], [0.5, 0.5], 'k:')
plot.plot([self.bins[0], self.bins[-1]], [0.158655254, 0.158655254], 'k:')
plot.plot([self.bins[0], self.bins[-1]], [0.841344746, 0.841344746], 'k:')
else:
if plottype == 'density':
data = data / self.widths
if errorbar is not None:
data_sdev = sdev(data)
if not numpy.all(data_sdev == 0.0):
data_mean = mean(data)
plot.errorbar(self.midpoints, data_mean, data_sdev, **errorbar)
if bar is not None:
plot.bar(self.bins[:-1], mean(data), width=self.widths, align='edge', **bar)
if gaussian is not None and self.g is not None:
# spline goes through the errorbar points for gaussian stats
if plottype == 'cumulative':
x = numpy.array(self.bins.tolist() + self.midpoints.tolist())
x.sort()
dx = (x - self.g.mean) / self.g.sdev
y = (erf(dx / 2**0.5) + 1) / 2.
yspline = cspline.CSpline(x, y)
plot.ylabel('cumulative probability')
plot.ylim(0, 1.0)
elif plottype in ['density', 'probability']:
x = self.bins
dx = (x - self.g.mean) / self.g.sdev
y = (erf(dx / 2**0.5) + 1) / 2.
x = self.midpoints
y = (y[1:] - y[:-1])
if plottype == 'density':
y /= self.widths
plot.ylabel('probability density')
else:
plot.ylabel('probability')
yspline = cspline.CSpline(x, y)
else:
raise ValueError('unknown plottype: ' + str(plottype))
if len(x) < 100:
ny = int(100. / len(x) + 0.5) * len(x)
else:
ny = len(x)
xplot = numpy.linspace(x[0], x[-1], ny)
plot.plot(xplot, yspline(xplot), **gaussian)
if show:
plot.show()
return plot |
def create_all(graph):
"""
Create all database tables.
"""
head = get_current_head(graph)
if head is None:
Model.metadata.create_all(graph.postgres)
stamp_head(graph) | Create all database tables. | Below is the the instruction that describes the task:
### Input:
Create all database tables.
### Response:
def create_all(graph):
"""
Create all database tables.
"""
head = get_current_head(graph)
if head is None:
Model.metadata.create_all(graph.postgres)
stamp_head(graph) |
def get_method_map(self, viewset, method_map):
"""
Given a viewset, and a mapping of http methods to actions, return a new mapping which only
includes any mappings that are actually implemented by the viewset.
"""
bound_methods = {}
for method, action in method_map.items():
if hasattr(viewset, action):
bound_methods[method] = action
return bound_methods | Given a viewset, and a mapping of http methods to actions, return a new mapping which only
includes any mappings that are actually implemented by the viewset. | Below is the the instruction that describes the task:
### Input:
Given a viewset, and a mapping of http methods to actions, return a new mapping which only
includes any mappings that are actually implemented by the viewset.
### Response:
def get_method_map(self, viewset, method_map):
"""
Given a viewset, and a mapping of http methods to actions, return a new mapping which only
includes any mappings that are actually implemented by the viewset.
"""
bound_methods = {}
for method, action in method_map.items():
if hasattr(viewset, action):
bound_methods[method] = action
return bound_methods |
def logger_initial_config(service_name=None,
log_level=None,
logger_format=None,
logger_date_format=None):
'''Set initial logging configurations.
:param service_name: Name of the service
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:param logger_format: A string defining the format of the logs
:type log_level: String
:param logger_date_format: A string defining the format of the date/time in the logs
:type log_level: String
:rtype: None
'''
if not log_level:
log_level = os.getenv('LOGGING_LEVEL', 'DEBUG')
if not logger_format:
logger_format = (
"%(asctime)s.%(msecs)06dZ|"
"%(levelname)s: {}: %(message)s"
).format(service_name)
if not logger_date_format:
logger_date_format = os.getenv('LOGGING_DATE_FORMAT', "%Y-%m-%dT%H:%M:%S")
logging.basicConfig(level=log_level,
format=logger_format,
datefmt=logger_date_format) | Set initial logging configurations.
:param service_name: Name of the service
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:param logger_format: A string defining the format of the logs
:type log_level: String
:param logger_date_format: A string defining the format of the date/time in the logs
:type log_level: String
:rtype: None | Below is the the instruction that describes the task:
### Input:
Set initial logging configurations.
:param service_name: Name of the service
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:param logger_format: A string defining the format of the logs
:type log_level: String
:param logger_date_format: A string defining the format of the date/time in the logs
:type log_level: String
:rtype: None
### Response:
def logger_initial_config(service_name=None,
log_level=None,
logger_format=None,
logger_date_format=None):
'''Set initial logging configurations.
:param service_name: Name of the service
:type logger: String
:param log_level: A string or integer corresponding to a Python logging level
:type log_level: String
:param logger_format: A string defining the format of the logs
:type log_level: String
:param logger_date_format: A string defining the format of the date/time in the logs
:type log_level: String
:rtype: None
'''
if not log_level:
log_level = os.getenv('LOGGING_LEVEL', 'DEBUG')
if not logger_format:
logger_format = (
"%(asctime)s.%(msecs)06dZ|"
"%(levelname)s: {}: %(message)s"
).format(service_name)
if not logger_date_format:
logger_date_format = os.getenv('LOGGING_DATE_FORMAT', "%Y-%m-%dT%H:%M:%S")
logging.basicConfig(level=log_level,
format=logger_format,
datefmt=logger_date_format) |
def render_subject(self, context):
"""
Renders the message subject for the given context.
The context data is automatically unescaped to avoid rendering HTML
entities in ``text/plain`` content.
:param context: The context to use when rendering the subject template.
:type context: :class:`~django.template.Context`
:returns: A rendered subject.
:rtype: :class:`str`
"""
rendered = self.subject_template.render(unescape(context))
return rendered.strip() | Renders the message subject for the given context.
The context data is automatically unescaped to avoid rendering HTML
entities in ``text/plain`` content.
:param context: The context to use when rendering the subject template.
:type context: :class:`~django.template.Context`
:returns: A rendered subject.
:rtype: :class:`str` | Below is the the instruction that describes the task:
### Input:
Renders the message subject for the given context.
The context data is automatically unescaped to avoid rendering HTML
entities in ``text/plain`` content.
:param context: The context to use when rendering the subject template.
:type context: :class:`~django.template.Context`
:returns: A rendered subject.
:rtype: :class:`str`
### Response:
def render_subject(self, context):
"""
Renders the message subject for the given context.
The context data is automatically unescaped to avoid rendering HTML
entities in ``text/plain`` content.
:param context: The context to use when rendering the subject template.
:type context: :class:`~django.template.Context`
:returns: A rendered subject.
:rtype: :class:`str`
"""
rendered = self.subject_template.render(unescape(context))
return rendered.strip() |
def hazard_at_times(self, times, label=None):
"""
Return a Pandas series of the predicted hazard at specific times.
Parameters
-----------
times: iterable or float
values to return the hazard at.
label: string, optional
Rename the series returned. Useful for plotting.
Returns
--------
pd.Series
"""
label = coalesce(label, self._label)
return pd.Series(self._hazard(self._fitted_parameters_, times), index=_to_array(times), name=label) | Return a Pandas series of the predicted hazard at specific times.
Parameters
-----------
times: iterable or float
values to return the hazard at.
label: string, optional
Rename the series returned. Useful for plotting.
Returns
--------
pd.Series | Below is the the instruction that describes the task:
### Input:
Return a Pandas series of the predicted hazard at specific times.
Parameters
-----------
times: iterable or float
values to return the hazard at.
label: string, optional
Rename the series returned. Useful for plotting.
Returns
--------
pd.Series
### Response:
def hazard_at_times(self, times, label=None):
"""
Return a Pandas series of the predicted hazard at specific times.
Parameters
-----------
times: iterable or float
values to return the hazard at.
label: string, optional
Rename the series returned. Useful for plotting.
Returns
--------
pd.Series
"""
label = coalesce(label, self._label)
return pd.Series(self._hazard(self._fitted_parameters_, times), index=_to_array(times), name=label) |
def handle_delete_scan_command(self, scan_et):
""" Handles <delete_scan> command.
@return: Response string for <delete_scan> command.
"""
scan_id = scan_et.attrib.get('scan_id')
if scan_id is None:
return simple_response_str('delete_scan', 404,
'No scan_id attribute')
if not self.scan_exists(scan_id):
text = "Failed to find scan '{0}'".format(scan_id)
return simple_response_str('delete_scan', 404, text)
self.check_scan_process(scan_id)
if self.delete_scan(scan_id):
return simple_response_str('delete_scan', 200, 'OK')
raise OSPDError('Scan in progress', 'delete_scan') | Handles <delete_scan> command.
@return: Response string for <delete_scan> command. | Below is the the instruction that describes the task:
### Input:
Handles <delete_scan> command.
@return: Response string for <delete_scan> command.
### Response:
def handle_delete_scan_command(self, scan_et):
""" Handles <delete_scan> command.
@return: Response string for <delete_scan> command.
"""
scan_id = scan_et.attrib.get('scan_id')
if scan_id is None:
return simple_response_str('delete_scan', 404,
'No scan_id attribute')
if not self.scan_exists(scan_id):
text = "Failed to find scan '{0}'".format(scan_id)
return simple_response_str('delete_scan', 404, text)
self.check_scan_process(scan_id)
if self.delete_scan(scan_id):
return simple_response_str('delete_scan', 200, 'OK')
raise OSPDError('Scan in progress', 'delete_scan') |
def _process_sasl_failure(self, stream, element):
"""Process incoming <sasl:failure/> element.
[initiating entity only]
"""
_unused = stream
if not self.authenticator:
logger.debug("Unexpected SASL response")
return False
logger.debug("SASL authentication failed: {0!r}".format(
element_to_unicode(element)))
raise SASLAuthenticationFailed("SASL authentication failed") | Process incoming <sasl:failure/> element.
[initiating entity only] | Below is the the instruction that describes the task:
### Input:
Process incoming <sasl:failure/> element.
[initiating entity only]
### Response:
def _process_sasl_failure(self, stream, element):
"""Process incoming <sasl:failure/> element.
[initiating entity only]
"""
_unused = stream
if not self.authenticator:
logger.debug("Unexpected SASL response")
return False
logger.debug("SASL authentication failed: {0!r}".format(
element_to_unicode(element)))
raise SASLAuthenticationFailed("SASL authentication failed") |
def run():
"""This client generates customer reports on all the samples in workbench."""
# Grab server args
args = client_helper.grab_server_args()
# Start up workbench connection
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect('tcp://'+args['server']+':'+args['port'])
all_set = workbench.generate_sample_set()
results = workbench.set_work_request('view_customer', all_set)
for customer in results:
print customer['customer'] | This client generates customer reports on all the samples in workbench. | Below is the the instruction that describes the task:
### Input:
This client generates customer reports on all the samples in workbench.
### Response:
def run():
"""This client generates customer reports on all the samples in workbench."""
# Grab server args
args = client_helper.grab_server_args()
# Start up workbench connection
workbench = zerorpc.Client(timeout=300, heartbeat=60)
workbench.connect('tcp://'+args['server']+':'+args['port'])
all_set = workbench.generate_sample_set()
results = workbench.set_work_request('view_customer', all_set)
for customer in results:
print customer['customer'] |
def jsonify(symbol):
""" returns json format for symbol """
try:
# all symbols have a toJson method, try it
return json.dumps(symbol.toJson(), indent=' ')
except AttributeError:
pass
return json.dumps(symbol, indent=' ') | returns json format for symbol | Below is the the instruction that describes the task:
### Input:
returns json format for symbol
### Response:
def jsonify(symbol):
""" returns json format for symbol """
try:
# all symbols have a toJson method, try it
return json.dumps(symbol.toJson(), indent=' ')
except AttributeError:
pass
return json.dumps(symbol, indent=' ') |
def parse_names_and_default(self):
"""parse for `parse_content`
{title: [('-a, --all=STH', 'default'), ...]}"""
result = {}
for title, text in self.formal_content.items():
if not text:
result[title] = []
continue
logger.debug('\n' + text)
collect = []
to_list = text.splitlines()
# parse first line. Should NEVER failed.
# this will ensure in `[default: xxx]`,
# the `xxx`(e.g: `\t`, `,`) will not be changed by _format_line
previous_line = to_list.pop(0)
collect.append(self.parse_line_option_indent(previous_line))
for line in to_list:
indent_match = self.indent_re.match(line)
this_indent = len(indent_match.groupdict()['indent'])
if this_indent >= collect[-1]['indent']:
# A multi line description
previous_line = line
continue
# new option line
# deal the default for previous option
collect[-1]['default'] = self.parse_default(previous_line)
# deal this option
collect.append(self.parse_line_option_indent(line))
logger.debug(collect[-1])
previous_line = line
else:
collect[-1]['default'] = self.parse_default(previous_line)
result[title] = [
(each['option'], each['default']) for each in collect]
return result | parse for `parse_content`
{title: [('-a, --all=STH', 'default'), ...]} | Below is the the instruction that describes the task:
### Input:
parse for `parse_content`
{title: [('-a, --all=STH', 'default'), ...]}
### Response:
def parse_names_and_default(self):
"""parse for `parse_content`
{title: [('-a, --all=STH', 'default'), ...]}"""
result = {}
for title, text in self.formal_content.items():
if not text:
result[title] = []
continue
logger.debug('\n' + text)
collect = []
to_list = text.splitlines()
# parse first line. Should NEVER failed.
# this will ensure in `[default: xxx]`,
# the `xxx`(e.g: `\t`, `,`) will not be changed by _format_line
previous_line = to_list.pop(0)
collect.append(self.parse_line_option_indent(previous_line))
for line in to_list:
indent_match = self.indent_re.match(line)
this_indent = len(indent_match.groupdict()['indent'])
if this_indent >= collect[-1]['indent']:
# A multi line description
previous_line = line
continue
# new option line
# deal the default for previous option
collect[-1]['default'] = self.parse_default(previous_line)
# deal this option
collect.append(self.parse_line_option_indent(line))
logger.debug(collect[-1])
previous_line = line
else:
collect[-1]['default'] = self.parse_default(previous_line)
result[title] = [
(each['option'], each['default']) for each in collect]
return result |
def read(self, cmd_args):
"""
Execute Vagrant read command.
:param list cmd_args:
Command argument list.
"""
args = [
"vagrant",
"--machine-readable"
]
args.extend(cmd_args)
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
for line in proc.stdout.readlines():
if len(line) == 0:
break
yield line.decode("UTF-8").rsplit(",")
proc.wait() | Execute Vagrant read command.
:param list cmd_args:
Command argument list. | Below is the the instruction that describes the task:
### Input:
Execute Vagrant read command.
:param list cmd_args:
Command argument list.
### Response:
def read(self, cmd_args):
"""
Execute Vagrant read command.
:param list cmd_args:
Command argument list.
"""
args = [
"vagrant",
"--machine-readable"
]
args.extend(cmd_args)
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
for line in proc.stdout.readlines():
if len(line) == 0:
break
yield line.decode("UTF-8").rsplit(",")
proc.wait() |
def remove_watcher(self, issue, watcher):
"""Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to remove from the watchers list
:rtype: Response
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
params = {'username': watcher}
result = self._session.delete(url, params=params)
return result | Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to remove from the watchers list
:rtype: Response | Below is the the instruction that describes the task:
### Input:
Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to remove from the watchers list
:rtype: Response
### Response:
def remove_watcher(self, issue, watcher):
"""Remove a user from an issue's watch list.
:param issue: ID or key of the issue affected
:param watcher: username of the user to remove from the watchers list
:rtype: Response
"""
url = self._get_url('issue/' + str(issue) + '/watchers')
params = {'username': watcher}
result = self._session.delete(url, params=params)
return result |
def info(self, remote_path):
"""Gets information about resource on WebDAV.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:param remote_path: the path to remote resource.
:return: a dictionary of information attributes and them values with following keys:
`created`: date of resource creation,
`name`: name of resource,
`size`: size of resource,
`modified`: date of resource modification.
"""
urn = Urn(remote_path)
if not self.check(urn.path()) and not self.check(Urn(remote_path, directory=True).path()):
raise RemoteResourceNotFound(remote_path)
response = self.execute_request(action='info', path=urn.quote())
path = self.get_full_path(urn)
return WebDavXmlUtils.parse_info_response(content=response.content, path=path, hostname=self.webdav.hostname) | Gets information about resource on WebDAV.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:param remote_path: the path to remote resource.
:return: a dictionary of information attributes and them values with following keys:
`created`: date of resource creation,
`name`: name of resource,
`size`: size of resource,
`modified`: date of resource modification. | Below is the the instruction that describes the task:
### Input:
Gets information about resource on WebDAV.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:param remote_path: the path to remote resource.
:return: a dictionary of information attributes and them values with following keys:
`created`: date of resource creation,
`name`: name of resource,
`size`: size of resource,
`modified`: date of resource modification.
### Response:
def info(self, remote_path):
"""Gets information about resource on WebDAV.
More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_PROPFIND
:param remote_path: the path to remote resource.
:return: a dictionary of information attributes and them values with following keys:
`created`: date of resource creation,
`name`: name of resource,
`size`: size of resource,
`modified`: date of resource modification.
"""
urn = Urn(remote_path)
if not self.check(urn.path()) and not self.check(Urn(remote_path, directory=True).path()):
raise RemoteResourceNotFound(remote_path)
response = self.execute_request(action='info', path=urn.quote())
path = self.get_full_path(urn)
return WebDavXmlUtils.parse_info_response(content=response.content, path=path, hostname=self.webdav.hostname) |
def add_unique_element(self, location, element):
"""
Create an entry located at ``location``.
Args:
location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for
instance).
element: Element to store.
Returns:
The created node with the element.
Notes:
The different sub locations entries **must** exist and the last one **MUST NOT** already exist.
Use the more loose :meth:`add_element` method if needed.
"""
return self._create_entry(location, element, unique=True) | Create an entry located at ``location``.
Args:
location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for
instance).
element: Element to store.
Returns:
The created node with the element.
Notes:
The different sub locations entries **must** exist and the last one **MUST NOT** already exist.
Use the more loose :meth:`add_element` method if needed. | Below is the the instruction that describes the task:
### Input:
Create an entry located at ``location``.
Args:
location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for
instance).
element: Element to store.
Returns:
The created node with the element.
Notes:
The different sub locations entries **must** exist and the last one **MUST NOT** already exist.
Use the more loose :meth:`add_element` method if needed.
### Response:
def add_unique_element(self, location, element):
"""
Create an entry located at ``location``.
Args:
location: String or :class:`LocationDescriptor` to describe a "separator location" (i.e. dir1/dir2/dir3 for
instance).
element: Element to store.
Returns:
The created node with the element.
Notes:
The different sub locations entries **must** exist and the last one **MUST NOT** already exist.
Use the more loose :meth:`add_element` method if needed.
"""
return self._create_entry(location, element, unique=True) |
def maybe_stream(s):
"""Ensure that the given argument is a stream."""
if isinstance(s, Stream):
return s
if s is None:
stream = InMemStream()
stream.close() # we don't intend to write anything
return stream
if isinstance(s, unicode):
s = s.encode('utf-8')
if isinstance(s, bytearray):
s = bytes(s)
if isinstance(s, bytes):
stream = InMemStream(s)
stream.close() # we don't intend to write anything
return stream
# s may still conform to the Stream interface. Yay duck typing.
return s | Ensure that the given argument is a stream. | Below is the the instruction that describes the task:
### Input:
Ensure that the given argument is a stream.
### Response:
def maybe_stream(s):
"""Ensure that the given argument is a stream."""
if isinstance(s, Stream):
return s
if s is None:
stream = InMemStream()
stream.close() # we don't intend to write anything
return stream
if isinstance(s, unicode):
s = s.encode('utf-8')
if isinstance(s, bytearray):
s = bytes(s)
if isinstance(s, bytes):
stream = InMemStream(s)
stream.close() # we don't intend to write anything
return stream
# s may still conform to the Stream interface. Yay duck typing.
return s |
def with_index(self, new_index):
"""
Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new
index but not in the existing index will be filled in with NaNs.
Parameters
----------
new_index : DateTimeIndex
"""
return TimeSeriesRDD(None, None, self._jtsrdd.withIndex(new_index._jdt_index), self.ctx) | Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new
index but not in the existing index will be filled in with NaNs.
Parameters
----------
new_index : DateTimeIndex | Below is the the instruction that describes the task:
### Input:
Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new
index but not in the existing index will be filled in with NaNs.
Parameters
----------
new_index : DateTimeIndex
### Response:
def with_index(self, new_index):
"""
Returns a TimeSeriesRDD rebased on top of a new index. Any timestamps that exist in the new
index but not in the existing index will be filled in with NaNs.
Parameters
----------
new_index : DateTimeIndex
"""
return TimeSeriesRDD(None, None, self._jtsrdd.withIndex(new_index._jdt_index), self.ctx) |
def get_env(env_file='.env'):
"""
Set default environment variables from .env file
"""
try:
with open(env_file) as f:
for line in f.readlines():
try:
key, val = line.split('=', maxsplit=1)
os.environ.setdefault(key.strip(), val.strip())
except ValueError:
pass
except FileNotFoundError:
pass | Set default environment variables from .env file | Below is the the instruction that describes the task:
### Input:
Set default environment variables from .env file
### Response:
def get_env(env_file='.env'):
"""
Set default environment variables from .env file
"""
try:
with open(env_file) as f:
for line in f.readlines():
try:
key, val = line.split('=', maxsplit=1)
os.environ.setdefault(key.strip(), val.strip())
except ValueError:
pass
except FileNotFoundError:
pass |
def validate(cert, ca_name, crl_file):
'''
.. versionadded:: Neon
Validate a certificate against a given CA/CRL.
cert
path to the certifiate PEM file or string
ca_name
name of the CA
crl_file
full path to the CRL file
'''
store = OpenSSL.crypto.X509Store()
cert_obj = _read_cert(cert)
if cert_obj is None:
raise CommandExecutionError(
'Failed to read cert from {0}, see log for details'.format(cert)
)
ca_dir = '{0}/{1}'.format(cert_base_path(), ca_name)
ca_cert = _read_cert('{0}/{1}_ca_cert.crt'.format(ca_dir, ca_name))
store.add_cert(ca_cert)
# These flags tell OpenSSL to check the leaf as well as the
# entire cert chain.
X509StoreFlags = OpenSSL.crypto.X509StoreFlags
store.set_flags(X509StoreFlags.CRL_CHECK | X509StoreFlags.CRL_CHECK_ALL)
if crl_file is None:
crl = OpenSSL.crypto.CRL()
else:
with salt.utils.files.fopen(crl_file) as fhr:
crl = OpenSSL.crypto.load_crl(OpenSSL.crypto.FILETYPE_PEM, fhr.read())
store.add_crl(crl)
context = OpenSSL.crypto.X509StoreContext(store, cert_obj)
ret = {}
try:
context.verify_certificate()
ret['valid'] = True
except OpenSSL.crypto.X509StoreContextError as e:
ret['error'] = str(e)
ret['error_cert'] = e.certificate
ret['valid'] = False
return ret | .. versionadded:: Neon
Validate a certificate against a given CA/CRL.
cert
path to the certifiate PEM file or string
ca_name
name of the CA
crl_file
full path to the CRL file | Below is the the instruction that describes the task:
### Input:
.. versionadded:: Neon
Validate a certificate against a given CA/CRL.
cert
path to the certifiate PEM file or string
ca_name
name of the CA
crl_file
full path to the CRL file
### Response:
def validate(cert, ca_name, crl_file):
'''
.. versionadded:: Neon
Validate a certificate against a given CA/CRL.
cert
path to the certifiate PEM file or string
ca_name
name of the CA
crl_file
full path to the CRL file
'''
store = OpenSSL.crypto.X509Store()
cert_obj = _read_cert(cert)
if cert_obj is None:
raise CommandExecutionError(
'Failed to read cert from {0}, see log for details'.format(cert)
)
ca_dir = '{0}/{1}'.format(cert_base_path(), ca_name)
ca_cert = _read_cert('{0}/{1}_ca_cert.crt'.format(ca_dir, ca_name))
store.add_cert(ca_cert)
# These flags tell OpenSSL to check the leaf as well as the
# entire cert chain.
X509StoreFlags = OpenSSL.crypto.X509StoreFlags
store.set_flags(X509StoreFlags.CRL_CHECK | X509StoreFlags.CRL_CHECK_ALL)
if crl_file is None:
crl = OpenSSL.crypto.CRL()
else:
with salt.utils.files.fopen(crl_file) as fhr:
crl = OpenSSL.crypto.load_crl(OpenSSL.crypto.FILETYPE_PEM, fhr.read())
store.add_crl(crl)
context = OpenSSL.crypto.X509StoreContext(store, cert_obj)
ret = {}
try:
context.verify_certificate()
ret['valid'] = True
except OpenSSL.crypto.X509StoreContextError as e:
ret['error'] = str(e)
ret['error_cert'] = e.certificate
ret['valid'] = False
return ret |
def AgregarUbicacionTambo(self, latitud, longitud, domicilio,
cod_localidad, cod_provincia, codigo_postal,
nombre_partido_depto, **kwargs):
"Agrego los datos del productor a la liq."
ubic_tambo = {'latitud': latitud,
'longitud': longitud,
'domicilio': domicilio,
'codLocalidad': cod_localidad,
'codProvincia': cod_provincia,
'nombrePartidoDepto': nombre_partido_depto,
'codigoPostal': codigo_postal}
self.solicitud['tambo']['ubicacionTambo'] = ubic_tambo
return True | Agrego los datos del productor a la liq. | Below is the the instruction that describes the task:
### Input:
Agrego los datos del productor a la liq.
### Response:
def AgregarUbicacionTambo(self, latitud, longitud, domicilio,
cod_localidad, cod_provincia, codigo_postal,
nombre_partido_depto, **kwargs):
"Agrego los datos del productor a la liq."
ubic_tambo = {'latitud': latitud,
'longitud': longitud,
'domicilio': domicilio,
'codLocalidad': cod_localidad,
'codProvincia': cod_provincia,
'nombrePartidoDepto': nombre_partido_depto,
'codigoPostal': codigo_postal}
self.solicitud['tambo']['ubicacionTambo'] = ubic_tambo
return True |
def operator_oropt(self, graph, solution, op_diff_round_digits, anim=None):
# TODO: check docstring
"""Applies Or-Opt intra-route operator to solution
Takes chains of nodes (length=3..1 consecutive nodes) from a given
route and calculates savings when inserted into another position on the
same route (all possible positions). Performes best move (max. saving)
and starts over again with new route until no improvement is found.
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
solution: BaseSolution
BaseSolution instance
op_diff_round_digits: float
Precision (floating point digits) for rounding route length differences.
*Details*: In some cases when an exchange is performed on two routes with one node each,
the difference between the both solutions (before and after the exchange) is not zero.
This is due to internal rounding errors of float type. So the loop won't break
(alternating between these two solutions), we need an additional criterion to avoid
this behaviour: A threshold to handle values very close to zero as if they were zero
(for a more detailed description of the matter see http://floating-point-gui.de or
https://docs.python.org/3.5/tutorial/floatingpoint.html)
anim: AnimationDing0
AnimationDing0 object
Returns
-------
LocalSearchSolution
A solution (LocalSearchSolution class)
Notes
-----
Since Or-Opt is an intra-route operator, it has not to be checked if route can allocate (Route's method
can_allocate()) nodes during relocation regarding max. peak load/current because the line/cable type is the
same along the entire route. However, node order within a route has an impact on the voltage stability
so the check would be actually required. Due to large line capacity (load factor of lines/cables ~60 %)
the voltage stability issues are neglected.
(Inner) Loop variables:
* s: length (count of consecutive nodes) of the chain that is moved. Values: 3..1
* i: node that precedes the chain before moving (position in the route `tour`, not node name)
* j: node that precedes the chain after moving (position in the route `tour`, not node name)
Todo
----
* insert literature reference for Or-algorithm here
* Remove ugly nested loops, convert to more efficient matrix operations
"""
no_ctr = 100
# shorter var names for loop
dm = graph._matrix
dn = graph._nodes
for route in solution.routes():
# exclude routes with single high-demand nodes (Load Areas)
if len(route._nodes) == 1:
if solution._problem._is_aggregated[str(route._nodes[0])]:
continue
n = len(route._nodes)+1
# create tour by adding depot at start and end
tour = [graph._depot] + route._nodes + [graph._depot]
# Or-Opt: Search better solutions by checking possible chain moves
while True:
length = route.length()
length_best = length
for s in range(3,0,-1):
for i in range(1,n-s):
length_diff = (length -
dm[dn[tour[i-1].name()]][dn[tour[i].name()]] -
dm[dn[tour[i+s-1].name()]][dn[tour[i+s].name()]] +
dm[dn[tour[i-1].name()]][dn[tour[i+s].name()]])
for j in range(i+s+1,n+1):
if j == n:
j2 = 1
else:
j2 = j+1
length_new = (length_diff +
dm[dn[tour[j-1].name()]][dn[tour[i].name()]] +
dm[dn[tour[i+s-1].name()]][dn[tour[j2-1].name()]] -
dm[dn[tour[j-1].name()]][dn[tour[j2-1].name()]])
if length_new < length_best:
length_best = length_new
s_best, i_best, j_best = s, i, j
if length_best < length:
tour = tour[0:i_best] + tour[i_best+s_best:j_best] + tour[i_best:i_best+s_best] + tour[j_best:n+1]
if anim is not None:
solution.draw_network(anim)
# no improvement found
if length_best == length:
# replace old route by new (same arg for allocation and deallocation since node order is considered at allocation)
solution._routes[solution._routes.index(route)].deallocate(tour[1:-1])
solution._routes[solution._routes.index(route)].allocate(tour[1:-1])
break
#solution = LocalSearchSolution(solution, graph, new_routes)
return solution | Applies Or-Opt intra-route operator to solution
Takes chains of nodes (length=3..1 consecutive nodes) from a given
route and calculates savings when inserted into another position on the
same route (all possible positions). Performes best move (max. saving)
and starts over again with new route until no improvement is found.
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
solution: BaseSolution
BaseSolution instance
op_diff_round_digits: float
Precision (floating point digits) for rounding route length differences.
*Details*: In some cases when an exchange is performed on two routes with one node each,
the difference between the both solutions (before and after the exchange) is not zero.
This is due to internal rounding errors of float type. So the loop won't break
(alternating between these two solutions), we need an additional criterion to avoid
this behaviour: A threshold to handle values very close to zero as if they were zero
(for a more detailed description of the matter see http://floating-point-gui.de or
https://docs.python.org/3.5/tutorial/floatingpoint.html)
anim: AnimationDing0
AnimationDing0 object
Returns
-------
LocalSearchSolution
A solution (LocalSearchSolution class)
Notes
-----
Since Or-Opt is an intra-route operator, it has not to be checked if route can allocate (Route's method
can_allocate()) nodes during relocation regarding max. peak load/current because the line/cable type is the
same along the entire route. However, node order within a route has an impact on the voltage stability
so the check would be actually required. Due to large line capacity (load factor of lines/cables ~60 %)
the voltage stability issues are neglected.
(Inner) Loop variables:
* s: length (count of consecutive nodes) of the chain that is moved. Values: 3..1
* i: node that precedes the chain before moving (position in the route `tour`, not node name)
* j: node that precedes the chain after moving (position in the route `tour`, not node name)
Todo
----
* insert literature reference for Or-algorithm here
* Remove ugly nested loops, convert to more efficient matrix operations | Below is the the instruction that describes the task:
### Input:
Applies Or-Opt intra-route operator to solution
Takes chains of nodes (length=3..1 consecutive nodes) from a given
route and calculates savings when inserted into another position on the
same route (all possible positions). Performes best move (max. saving)
and starts over again with new route until no improvement is found.
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
solution: BaseSolution
BaseSolution instance
op_diff_round_digits: float
Precision (floating point digits) for rounding route length differences.
*Details*: In some cases when an exchange is performed on two routes with one node each,
the difference between the both solutions (before and after the exchange) is not zero.
This is due to internal rounding errors of float type. So the loop won't break
(alternating between these two solutions), we need an additional criterion to avoid
this behaviour: A threshold to handle values very close to zero as if they were zero
(for a more detailed description of the matter see http://floating-point-gui.de or
https://docs.python.org/3.5/tutorial/floatingpoint.html)
anim: AnimationDing0
AnimationDing0 object
Returns
-------
LocalSearchSolution
A solution (LocalSearchSolution class)
Notes
-----
Since Or-Opt is an intra-route operator, it has not to be checked if route can allocate (Route's method
can_allocate()) nodes during relocation regarding max. peak load/current because the line/cable type is the
same along the entire route. However, node order within a route has an impact on the voltage stability
so the check would be actually required. Due to large line capacity (load factor of lines/cables ~60 %)
the voltage stability issues are neglected.
(Inner) Loop variables:
* s: length (count of consecutive nodes) of the chain that is moved. Values: 3..1
* i: node that precedes the chain before moving (position in the route `tour`, not node name)
* j: node that precedes the chain after moving (position in the route `tour`, not node name)
Todo
----
* insert literature reference for Or-algorithm here
* Remove ugly nested loops, convert to more efficient matrix operations
### Response:
def operator_oropt(self, graph, solution, op_diff_round_digits, anim=None):
# TODO: check docstring
"""Applies Or-Opt intra-route operator to solution
Takes chains of nodes (length=3..1 consecutive nodes) from a given
route and calculates savings when inserted into another position on the
same route (all possible positions). Performes best move (max. saving)
and starts over again with new route until no improvement is found.
Args
----
graph: :networkx:`NetworkX Graph Obj< >`
A NetworkX graaph is used.
solution: BaseSolution
BaseSolution instance
op_diff_round_digits: float
Precision (floating point digits) for rounding route length differences.
*Details*: In some cases when an exchange is performed on two routes with one node each,
the difference between the both solutions (before and after the exchange) is not zero.
This is due to internal rounding errors of float type. So the loop won't break
(alternating between these two solutions), we need an additional criterion to avoid
this behaviour: A threshold to handle values very close to zero as if they were zero
(for a more detailed description of the matter see http://floating-point-gui.de or
https://docs.python.org/3.5/tutorial/floatingpoint.html)
anim: AnimationDing0
AnimationDing0 object
Returns
-------
LocalSearchSolution
A solution (LocalSearchSolution class)
Notes
-----
Since Or-Opt is an intra-route operator, it has not to be checked if route can allocate (Route's method
can_allocate()) nodes during relocation regarding max. peak load/current because the line/cable type is the
same along the entire route. However, node order within a route has an impact on the voltage stability
so the check would be actually required. Due to large line capacity (load factor of lines/cables ~60 %)
the voltage stability issues are neglected.
(Inner) Loop variables:
* s: length (count of consecutive nodes) of the chain that is moved. Values: 3..1
* i: node that precedes the chain before moving (position in the route `tour`, not node name)
* j: node that precedes the chain after moving (position in the route `tour`, not node name)
Todo
----
* insert literature reference for Or-algorithm here
* Remove ugly nested loops, convert to more efficient matrix operations
"""
no_ctr = 100
# shorter var names for loop
dm = graph._matrix
dn = graph._nodes
for route in solution.routes():
# exclude routes with single high-demand nodes (Load Areas)
if len(route._nodes) == 1:
if solution._problem._is_aggregated[str(route._nodes[0])]:
continue
n = len(route._nodes)+1
# create tour by adding depot at start and end
tour = [graph._depot] + route._nodes + [graph._depot]
# Or-Opt: Search better solutions by checking possible chain moves
while True:
length = route.length()
length_best = length
for s in range(3,0,-1):
for i in range(1,n-s):
length_diff = (length -
dm[dn[tour[i-1].name()]][dn[tour[i].name()]] -
dm[dn[tour[i+s-1].name()]][dn[tour[i+s].name()]] +
dm[dn[tour[i-1].name()]][dn[tour[i+s].name()]])
for j in range(i+s+1,n+1):
if j == n:
j2 = 1
else:
j2 = j+1
length_new = (length_diff +
dm[dn[tour[j-1].name()]][dn[tour[i].name()]] +
dm[dn[tour[i+s-1].name()]][dn[tour[j2-1].name()]] -
dm[dn[tour[j-1].name()]][dn[tour[j2-1].name()]])
if length_new < length_best:
length_best = length_new
s_best, i_best, j_best = s, i, j
if length_best < length:
tour = tour[0:i_best] + tour[i_best+s_best:j_best] + tour[i_best:i_best+s_best] + tour[j_best:n+1]
if anim is not None:
solution.draw_network(anim)
# no improvement found
if length_best == length:
# replace old route by new (same arg for allocation and deallocation since node order is considered at allocation)
solution._routes[solution._routes.index(route)].deallocate(tour[1:-1])
solution._routes[solution._routes.index(route)].allocate(tour[1:-1])
break
#solution = LocalSearchSolution(solution, graph, new_routes)
return solution |
def get_attribute_from_indices(self, indices: list, attribute_name: str):
"""Get attribute values for the requested indices.
:param indices: Indices of vertices for which the attribute values are requested.
:param attribute_name: The name of the attribute.
:return: A list of attribute values for the requested indices.
"""
return list(np.array(self.graph.vs[attribute_name])[indices]) | Get attribute values for the requested indices.
:param indices: Indices of vertices for which the attribute values are requested.
:param attribute_name: The name of the attribute.
:return: A list of attribute values for the requested indices. | Below is the the instruction that describes the task:
### Input:
Get attribute values for the requested indices.
:param indices: Indices of vertices for which the attribute values are requested.
:param attribute_name: The name of the attribute.
:return: A list of attribute values for the requested indices.
### Response:
def get_attribute_from_indices(self, indices: list, attribute_name: str):
"""Get attribute values for the requested indices.
:param indices: Indices of vertices for which the attribute values are requested.
:param attribute_name: The name of the attribute.
:return: A list of attribute values for the requested indices.
"""
return list(np.array(self.graph.vs[attribute_name])[indices]) |
def QA_fetch_risk(message={}, params={"_id": 0, 'assets': 0, 'timeindex': 0, 'totaltimeindex': 0, 'benchmark_assets': 0, 'month_profit': 0}, db=DATABASE):
"""get the risk message
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
"""
collection = DATABASE.risk
return [res for res in collection.find(message, params)] | get the risk message
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description] | Below is the the instruction that describes the task:
### Input:
get the risk message
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
### Response:
def QA_fetch_risk(message={}, params={"_id": 0, 'assets': 0, 'timeindex': 0, 'totaltimeindex': 0, 'benchmark_assets': 0, 'month_profit': 0}, db=DATABASE):
"""get the risk message
Arguments:
query_mes {[type]} -- [description]
Keyword Arguments:
collection {[type]} -- [description] (default: {DATABASE})
Returns:
[type] -- [description]
"""
collection = DATABASE.risk
return [res for res in collection.find(message, params)] |
def from_histogram(cls, histogram, bin_edges, axis_names=None):
"""Make a HistdD from numpy histogram + bin edges
:param histogram: Initial histogram
:param bin_edges: x bin edges of histogram, y bin edges, ...
:return: Histnd instance
"""
bin_edges = np.array(bin_edges)
self = cls(bins=bin_edges, axis_names=axis_names)
self.histogram = histogram
return self | Make a HistdD from numpy histogram + bin edges
:param histogram: Initial histogram
:param bin_edges: x bin edges of histogram, y bin edges, ...
:return: Histnd instance | Below is the the instruction that describes the task:
### Input:
Make a HistdD from numpy histogram + bin edges
:param histogram: Initial histogram
:param bin_edges: x bin edges of histogram, y bin edges, ...
:return: Histnd instance
### Response:
def from_histogram(cls, histogram, bin_edges, axis_names=None):
"""Make a HistdD from numpy histogram + bin edges
:param histogram: Initial histogram
:param bin_edges: x bin edges of histogram, y bin edges, ...
:return: Histnd instance
"""
bin_edges = np.array(bin_edges)
self = cls(bins=bin_edges, axis_names=axis_names)
self.histogram = histogram
return self |
def asset_create_combo(self, name, combo, tag='', description=''):
'''asset_create_combo name, combination, tag, description
Creates a new combination asset list. Operands can be either asset list
IDs or be a nested combination asset list.
UN-DOCUMENTED CALL: This function is not considered stable.
AND = intersection
OR = union
operand = asset list ID or nested combination.
operator = intersection or union.
Example:
combo = {
'operand1': {
'operand1': '2',
'operand2': '2',
'operation': 'union',
},
'operand2': '3',
'operation': 'intersection'
}
:param name: Name of the asset list.
:type name: string
:param combo: dict
:param tag: The tag of the asset list.
:type tag: string
:param description: Description of the asset list.
:type description: string
'''
return self.raw_query('asset', 'add', data={
'name': name,
'description': description,
'type': 'combination',
'combinations': combo,
}) | asset_create_combo name, combination, tag, description
Creates a new combination asset list. Operands can be either asset list
IDs or be a nested combination asset list.
UN-DOCUMENTED CALL: This function is not considered stable.
AND = intersection
OR = union
operand = asset list ID or nested combination.
operator = intersection or union.
Example:
combo = {
'operand1': {
'operand1': '2',
'operand2': '2',
'operation': 'union',
},
'operand2': '3',
'operation': 'intersection'
}
:param name: Name of the asset list.
:type name: string
:param combo: dict
:param tag: The tag of the asset list.
:type tag: string
:param description: Description of the asset list.
:type description: string | Below is the the instruction that describes the task:
### Input:
asset_create_combo name, combination, tag, description
Creates a new combination asset list. Operands can be either asset list
IDs or be a nested combination asset list.
UN-DOCUMENTED CALL: This function is not considered stable.
AND = intersection
OR = union
operand = asset list ID or nested combination.
operator = intersection or union.
Example:
combo = {
'operand1': {
'operand1': '2',
'operand2': '2',
'operation': 'union',
},
'operand2': '3',
'operation': 'intersection'
}
:param name: Name of the asset list.
:type name: string
:param combo: dict
:param tag: The tag of the asset list.
:type tag: string
:param description: Description of the asset list.
:type description: string
### Response:
def asset_create_combo(self, name, combo, tag='', description=''):
'''asset_create_combo name, combination, tag, description
Creates a new combination asset list. Operands can be either asset list
IDs or be a nested combination asset list.
UN-DOCUMENTED CALL: This function is not considered stable.
AND = intersection
OR = union
operand = asset list ID or nested combination.
operator = intersection or union.
Example:
combo = {
'operand1': {
'operand1': '2',
'operand2': '2',
'operation': 'union',
},
'operand2': '3',
'operation': 'intersection'
}
:param name: Name of the asset list.
:type name: string
:param combo: dict
:param tag: The tag of the asset list.
:type tag: string
:param description: Description of the asset list.
:type description: string
'''
return self.raw_query('asset', 'add', data={
'name': name,
'description': description,
'type': 'combination',
'combinations': combo,
}) |
def fix_variable(self, v, value):
"""Fix the value of a variable and remove it from the constraint.
Args:
v (variable):
Variable in the constraint to be set to a constant value.
val (int):
Value assigned to the variable. Values must match the :class:`.Vartype` of the
constraint.
Examples:
This example creates a constraint that :math:`a \\ne b` on binary variables,
fixes variable a to 0, and tests two candidate solutions.
>>> import dwavebinarycsp
>>> const = dwavebinarycsp.Constraint.from_func(operator.ne,
... ['a', 'b'], dwavebinarycsp.BINARY)
>>> const.fix_variable('a', 0)
>>> const.check({'b': 1})
True
>>> const.check({'b': 0})
False
"""
variables = self.variables
try:
idx = variables.index(v)
except ValueError:
raise ValueError("given variable {} is not part of the constraint".format(v))
if value not in self.vartype.value:
raise ValueError("expected value to be in {}, received {} instead".format(self.vartype.value, value))
configurations = frozenset(config[:idx] + config[idx + 1:] # exclude the fixed var
for config in self.configurations
if config[idx] == value)
if not configurations:
raise UnsatError("fixing {} to {} makes this constraint unsatisfiable".format(v, value))
variables = variables[:idx] + variables[idx + 1:]
self.configurations = configurations
self.variables = variables
def func(*args): return args in configurations
self.func = func
self.name = '{} ({} fixed to {})'.format(self.name, v, value) | Fix the value of a variable and remove it from the constraint.
Args:
v (variable):
Variable in the constraint to be set to a constant value.
val (int):
Value assigned to the variable. Values must match the :class:`.Vartype` of the
constraint.
Examples:
This example creates a constraint that :math:`a \\ne b` on binary variables,
fixes variable a to 0, and tests two candidate solutions.
>>> import dwavebinarycsp
>>> const = dwavebinarycsp.Constraint.from_func(operator.ne,
... ['a', 'b'], dwavebinarycsp.BINARY)
>>> const.fix_variable('a', 0)
>>> const.check({'b': 1})
True
>>> const.check({'b': 0})
False | Below is the the instruction that describes the task:
### Input:
Fix the value of a variable and remove it from the constraint.
Args:
v (variable):
Variable in the constraint to be set to a constant value.
val (int):
Value assigned to the variable. Values must match the :class:`.Vartype` of the
constraint.
Examples:
This example creates a constraint that :math:`a \\ne b` on binary variables,
fixes variable a to 0, and tests two candidate solutions.
>>> import dwavebinarycsp
>>> const = dwavebinarycsp.Constraint.from_func(operator.ne,
... ['a', 'b'], dwavebinarycsp.BINARY)
>>> const.fix_variable('a', 0)
>>> const.check({'b': 1})
True
>>> const.check({'b': 0})
False
### Response:
def fix_variable(self, v, value):
"""Fix the value of a variable and remove it from the constraint.
Args:
v (variable):
Variable in the constraint to be set to a constant value.
val (int):
Value assigned to the variable. Values must match the :class:`.Vartype` of the
constraint.
Examples:
This example creates a constraint that :math:`a \\ne b` on binary variables,
fixes variable a to 0, and tests two candidate solutions.
>>> import dwavebinarycsp
>>> const = dwavebinarycsp.Constraint.from_func(operator.ne,
... ['a', 'b'], dwavebinarycsp.BINARY)
>>> const.fix_variable('a', 0)
>>> const.check({'b': 1})
True
>>> const.check({'b': 0})
False
"""
variables = self.variables
try:
idx = variables.index(v)
except ValueError:
raise ValueError("given variable {} is not part of the constraint".format(v))
if value not in self.vartype.value:
raise ValueError("expected value to be in {}, received {} instead".format(self.vartype.value, value))
configurations = frozenset(config[:idx] + config[idx + 1:] # exclude the fixed var
for config in self.configurations
if config[idx] == value)
if not configurations:
raise UnsatError("fixing {} to {} makes this constraint unsatisfiable".format(v, value))
variables = variables[:idx] + variables[idx + 1:]
self.configurations = configurations
self.variables = variables
def func(*args): return args in configurations
self.func = func
self.name = '{} ({} fixed to {})'.format(self.name, v, value) |
def _merge_prims(prims, *, debug=False, stagenames=None, stages=None):
"""Helper method to greedily combine Frames (of Primitives) or Primitives based on the rules defined in the Primitive's class.
Used by a CommandQueue during compilation and optimization of
Primitives.
Args:
prims: A list or FrameSequence of Primitives or Frames (respectively) to try to merge together.
debug: A boolean for if debug information should be generated.
stages: A list to be edited by this method to store snapshots of the compilation state. Used if debug is True.
stagenames: A list of strings describing each debug snapshot of the compiilation process. Used if debug is True.
Returns:
A list or FrameSequence (the same type as prims) of the compined Primitives or Frames.
"""
if isinstance(prims, FrameSequence):
merged_prims = FrameSequence(prims._chain)
else:
merged_prims = []
working_prim = prims[0]
i = 1
logging_tmp = []
while i < len(prims):
tmp = prims[i]
res = working_prim.merge(tmp)
if res is not None:
working_prim = res
if debug:#pragma: no cover
logging_tmp.append(
[p.snapshot() for p in
merged_prims+[working_prim]])
else:
merged_prims.append(working_prim)
working_prim = tmp
i += 1
merged_prims.append(working_prim)
if debug:#pragma: no cover
stages.append(logging_tmp)
stagenames.append("Merge intermediate states")
return merged_prims | Helper method to greedily combine Frames (of Primitives) or Primitives based on the rules defined in the Primitive's class.
Used by a CommandQueue during compilation and optimization of
Primitives.
Args:
prims: A list or FrameSequence of Primitives or Frames (respectively) to try to merge together.
debug: A boolean for if debug information should be generated.
stages: A list to be edited by this method to store snapshots of the compilation state. Used if debug is True.
stagenames: A list of strings describing each debug snapshot of the compiilation process. Used if debug is True.
Returns:
A list or FrameSequence (the same type as prims) of the compined Primitives or Frames. | Below is the the instruction that describes the task:
### Input:
Helper method to greedily combine Frames (of Primitives) or Primitives based on the rules defined in the Primitive's class.
Used by a CommandQueue during compilation and optimization of
Primitives.
Args:
prims: A list or FrameSequence of Primitives or Frames (respectively) to try to merge together.
debug: A boolean for if debug information should be generated.
stages: A list to be edited by this method to store snapshots of the compilation state. Used if debug is True.
stagenames: A list of strings describing each debug snapshot of the compiilation process. Used if debug is True.
Returns:
A list or FrameSequence (the same type as prims) of the compined Primitives or Frames.
### Response:
def _merge_prims(prims, *, debug=False, stagenames=None, stages=None):
"""Helper method to greedily combine Frames (of Primitives) or Primitives based on the rules defined in the Primitive's class.
Used by a CommandQueue during compilation and optimization of
Primitives.
Args:
prims: A list or FrameSequence of Primitives or Frames (respectively) to try to merge together.
debug: A boolean for if debug information should be generated.
stages: A list to be edited by this method to store snapshots of the compilation state. Used if debug is True.
stagenames: A list of strings describing each debug snapshot of the compiilation process. Used if debug is True.
Returns:
A list or FrameSequence (the same type as prims) of the compined Primitives or Frames.
"""
if isinstance(prims, FrameSequence):
merged_prims = FrameSequence(prims._chain)
else:
merged_prims = []
working_prim = prims[0]
i = 1
logging_tmp = []
while i < len(prims):
tmp = prims[i]
res = working_prim.merge(tmp)
if res is not None:
working_prim = res
if debug:#pragma: no cover
logging_tmp.append(
[p.snapshot() for p in
merged_prims+[working_prim]])
else:
merged_prims.append(working_prim)
working_prim = tmp
i += 1
merged_prims.append(working_prim)
if debug:#pragma: no cover
stages.append(logging_tmp)
stagenames.append("Merge intermediate states")
return merged_prims |
def _get_corr_stddevs(C, tau_ss, stddev_types, num_sites, phi_ss, NL=None,
tau_value=None):
"""
Return standard deviations adjusted for single station sigma
as the total standard deviation - as proposed to be used in
the Swiss Hazard Model [2014].
"""
stddevs = []
temp_stddev = phi_ss * phi_ss
if tau_value is not None and NL is not None:
temp_stddev = temp_stddev + tau_value * tau_value * ((1 + NL) ** 2)
else:
temp_stddev = temp_stddev + C[tau_ss] * C[tau_ss]
for stddev_type in stddev_types:
if stddev_type == const.StdDev.TOTAL:
stddevs.append(np.sqrt(temp_stddev) + np.zeros(num_sites))
return stddevs | Return standard deviations adjusted for single station sigma
as the total standard deviation - as proposed to be used in
the Swiss Hazard Model [2014]. | Below is the the instruction that describes the task:
### Input:
Return standard deviations adjusted for single station sigma
as the total standard deviation - as proposed to be used in
the Swiss Hazard Model [2014].
### Response:
def _get_corr_stddevs(C, tau_ss, stddev_types, num_sites, phi_ss, NL=None,
tau_value=None):
"""
Return standard deviations adjusted for single station sigma
as the total standard deviation - as proposed to be used in
the Swiss Hazard Model [2014].
"""
stddevs = []
temp_stddev = phi_ss * phi_ss
if tau_value is not None and NL is not None:
temp_stddev = temp_stddev + tau_value * tau_value * ((1 + NL) ** 2)
else:
temp_stddev = temp_stddev + C[tau_ss] * C[tau_ss]
for stddev_type in stddev_types:
if stddev_type == const.StdDev.TOTAL:
stddevs.append(np.sqrt(temp_stddev) + np.zeros(num_sites))
return stddevs |
def _vagrant_ssh_config(vm_):
'''
get the information for ssh communication from the new VM
:param vm_: the VM's info as we have it now
:return: dictionary of ssh stuff
'''
machine = vm_['machine']
log.info('requesting vagrant ssh-config for VM %s', machine or '(default)')
cmd = 'vagrant ssh-config {}'.format(machine)
reply = __salt__['cmd.shell'](cmd,
runas=vm_.get('runas'),
cwd=vm_.get('cwd'),
ignore_retcode=True)
ssh_config = {}
for line in reply.split('\n'): # build a dictionary of the text reply
tokens = line.strip().split()
if len(tokens) == 2: # each two-token line becomes a key:value pair
ssh_config[tokens[0]] = tokens[1]
log.debug('ssh_config=%s', repr(ssh_config))
return ssh_config | get the information for ssh communication from the new VM
:param vm_: the VM's info as we have it now
:return: dictionary of ssh stuff | Below is the the instruction that describes the task:
### Input:
get the information for ssh communication from the new VM
:param vm_: the VM's info as we have it now
:return: dictionary of ssh stuff
### Response:
def _vagrant_ssh_config(vm_):
'''
get the information for ssh communication from the new VM
:param vm_: the VM's info as we have it now
:return: dictionary of ssh stuff
'''
machine = vm_['machine']
log.info('requesting vagrant ssh-config for VM %s', machine or '(default)')
cmd = 'vagrant ssh-config {}'.format(machine)
reply = __salt__['cmd.shell'](cmd,
runas=vm_.get('runas'),
cwd=vm_.get('cwd'),
ignore_retcode=True)
ssh_config = {}
for line in reply.split('\n'): # build a dictionary of the text reply
tokens = line.strip().split()
if len(tokens) == 2: # each two-token line becomes a key:value pair
ssh_config[tokens[0]] = tokens[1]
log.debug('ssh_config=%s', repr(ssh_config))
return ssh_config |
def get_version():
"""Reads the version (MAJOR.MINOR) from this module."""
release = get_release()
split_version = release.split(".")
if len(split_version) == 3:
return ".".join(split_version[:2])
return release | Reads the version (MAJOR.MINOR) from this module. | Below is the the instruction that describes the task:
### Input:
Reads the version (MAJOR.MINOR) from this module.
### Response:
def get_version():
"""Reads the version (MAJOR.MINOR) from this module."""
release = get_release()
split_version = release.split(".")
if len(split_version) == 3:
return ".".join(split_version[:2])
return release |
def warn(self, msg, whitespace_strp=True):
"""
For things that have gone seriously wrong but don't merit a program
halt.
Outputs to stderr, so JsonOutput does not need to override.
@param msg: warning to output.
@param whitespace_strp: whether to strip whitespace.
"""
if self.errors_display:
if whitespace_strp:
msg = strip_whitespace(msg)
if not self.log_to_file:
msg = colors['warn'] + "[+] " + msg + colors['endc']
else:
msg = "[" + time.strftime("%c") + "] " + msg
self.print(msg, file=self.error_log) | For things that have gone seriously wrong but don't merit a program
halt.
Outputs to stderr, so JsonOutput does not need to override.
@param msg: warning to output.
@param whitespace_strp: whether to strip whitespace. | Below is the the instruction that describes the task:
### Input:
For things that have gone seriously wrong but don't merit a program
halt.
Outputs to stderr, so JsonOutput does not need to override.
@param msg: warning to output.
@param whitespace_strp: whether to strip whitespace.
### Response:
def warn(self, msg, whitespace_strp=True):
"""
For things that have gone seriously wrong but don't merit a program
halt.
Outputs to stderr, so JsonOutput does not need to override.
@param msg: warning to output.
@param whitespace_strp: whether to strip whitespace.
"""
if self.errors_display:
if whitespace_strp:
msg = strip_whitespace(msg)
if not self.log_to_file:
msg = colors['warn'] + "[+] " + msg + colors['endc']
else:
msg = "[" + time.strftime("%c") + "] " + msg
self.print(msg, file=self.error_log) |
def ts_to_df(metadata):
"""
Create a data frame from one TimeSeries object
:param dict metadata: Time Series dictionary
:return dict: One data frame per table, organized in a dictionary by name
"""
logger_dataframes.info("enter ts_to_df")
dfs = {}
# Plot the variable + values vs year, age, depth (whichever are available)
dfs["paleoData"] = pd.DataFrame(_plot_ts_cols(metadata))
# Plot the chronology variables + values in a data frame
dfs["chronData"] = _get_key_data(metadata, "chronData_df")
# Take out the chronData pandas data frame object if it exists in the metadata
# Otherwise, the data frame renderer gets crazy and errors out.
if "chronData_df" in metadata:
del metadata["chronData_df"]
s = collections.OrderedDict(sorted(metadata.items()))
# Put key-vars in a data frame to make it easier to visualize
dfs["metadata"] = pd.DataFrame(list(s.items()), columns=['Key', 'Value'])
logger_dataframes.info("exit ts_to_df")
return dfs | Create a data frame from one TimeSeries object
:param dict metadata: Time Series dictionary
:return dict: One data frame per table, organized in a dictionary by name | Below is the the instruction that describes the task:
### Input:
Create a data frame from one TimeSeries object
:param dict metadata: Time Series dictionary
:return dict: One data frame per table, organized in a dictionary by name
### Response:
def ts_to_df(metadata):
"""
Create a data frame from one TimeSeries object
:param dict metadata: Time Series dictionary
:return dict: One data frame per table, organized in a dictionary by name
"""
logger_dataframes.info("enter ts_to_df")
dfs = {}
# Plot the variable + values vs year, age, depth (whichever are available)
dfs["paleoData"] = pd.DataFrame(_plot_ts_cols(metadata))
# Plot the chronology variables + values in a data frame
dfs["chronData"] = _get_key_data(metadata, "chronData_df")
# Take out the chronData pandas data frame object if it exists in the metadata
# Otherwise, the data frame renderer gets crazy and errors out.
if "chronData_df" in metadata:
del metadata["chronData_df"]
s = collections.OrderedDict(sorted(metadata.items()))
# Put key-vars in a data frame to make it easier to visualize
dfs["metadata"] = pd.DataFrame(list(s.items()), columns=['Key', 'Value'])
logger_dataframes.info("exit ts_to_df")
return dfs |
def set_defaults(self, config_file):
"""Set defaults.
"""
self.defaults = Defaults(config_file)
self.python = Python()
self.setuptools = Setuptools()
self.docutils = Docutils()
self.styles = self.defaults.styles
self.browser = self.defaults.browser
self.list = False | Set defaults. | Below is the the instruction that describes the task:
### Input:
Set defaults.
### Response:
def set_defaults(self, config_file):
"""Set defaults.
"""
self.defaults = Defaults(config_file)
self.python = Python()
self.setuptools = Setuptools()
self.docutils = Docutils()
self.styles = self.defaults.styles
self.browser = self.defaults.browser
self.list = False |
def null_advance(self, blocksize):
"""Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
"""
self.raw_buffer.roll(-int(blocksize * self.raw_sample_rate))
self.read_pos += blocksize
self.raw_buffer.start_time += blocksize | Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel | Below is the the instruction that describes the task:
### Input:
Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
### Response:
def null_advance(self, blocksize):
"""Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
"""
self.raw_buffer.roll(-int(blocksize * self.raw_sample_rate))
self.read_pos += blocksize
self.raw_buffer.start_time += blocksize |
def get(self):
"""
Constructs a ExecutionContextContext
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
"""
return ExecutionContextContext(
self._version,
flow_sid=self._solution['flow_sid'],
execution_sid=self._solution['execution_sid'],
) | Constructs a ExecutionContextContext
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext | Below is the the instruction that describes the task:
### Input:
Constructs a ExecutionContextContext
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
### Response:
def get(self):
"""
Constructs a ExecutionContextContext
:returns: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
:rtype: twilio.rest.studio.v1.flow.execution.execution_context.ExecutionContextContext
"""
return ExecutionContextContext(
self._version,
flow_sid=self._solution['flow_sid'],
execution_sid=self._solution['execution_sid'],
) |
def _convert_before_2_0_0_b3(self, dynamips_id):
"""
Before 2.0.0 beta3 the node didn't have a folder by node
when we start we move the file, we can't do it in the topology
conversion due to case of remote servers
"""
dynamips_dir = self.project.module_working_directory(self.manager.module_name.lower())
for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "configs", "i{}_*".format(dynamips_id))):
dst = os.path.join(self._working_directory, "configs", os.path.basename(path))
if not os.path.exists(dst):
try:
shutil.move(path, dst)
except OSError as e:
raise DynamipsError("Can't move {}: {}".format(path, str(e)))
for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "*_i{}_*".format(dynamips_id))):
dst = os.path.join(self._working_directory, os.path.basename(path))
if not os.path.exists(dst):
try:
shutil.move(path, dst)
except OSError as e:
raise DynamipsError("Can't move {}: {}".format(path, str(e))) | Before 2.0.0 beta3 the node didn't have a folder by node
when we start we move the file, we can't do it in the topology
conversion due to case of remote servers | Below is the the instruction that describes the task:
### Input:
Before 2.0.0 beta3 the node didn't have a folder by node
when we start we move the file, we can't do it in the topology
conversion due to case of remote servers
### Response:
def _convert_before_2_0_0_b3(self, dynamips_id):
"""
Before 2.0.0 beta3 the node didn't have a folder by node
when we start we move the file, we can't do it in the topology
conversion due to case of remote servers
"""
dynamips_dir = self.project.module_working_directory(self.manager.module_name.lower())
for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "configs", "i{}_*".format(dynamips_id))):
dst = os.path.join(self._working_directory, "configs", os.path.basename(path))
if not os.path.exists(dst):
try:
shutil.move(path, dst)
except OSError as e:
raise DynamipsError("Can't move {}: {}".format(path, str(e)))
for path in glob.glob(os.path.join(glob.escape(dynamips_dir), "*_i{}_*".format(dynamips_id))):
dst = os.path.join(self._working_directory, os.path.basename(path))
if not os.path.exists(dst):
try:
shutil.move(path, dst)
except OSError as e:
raise DynamipsError("Can't move {}: {}".format(path, str(e))) |
def censor_background(sample_frame, ntc_samples=['NTC'], margin=log2(10)):
"""Selects rows from the sample data frame that fall `margin` or greater
cycles earlier than the NTC for that target. NTC wells are recognized by
string matching against the Sample column.
:param DataFrame sample_frame: A sample data frame.
:param iterable ntc_samples: A sequence of strings giving the sample names of your NTC wells, i.e. ['NTC']
:param float margin: The number of cycles earlier than the NTC for a "good" sample, i.e. log2(10)
:return: a view of the sample data frame containing only non-background rows
:rtype: DataFrame
"""
ntcs = sample_frame.loc[ sample_frame['Sample'].apply(lambda x: x in ntc_samples), ]
if ntcs.empty:
return sample_frame
g = ntcs.groupby('Target')
min_ntcs = g['Cq'].min()
# if a target has no NTC, min_ntcs.loc[sample] is NaN
# we should retain all values from targets with no NTC
# all comparisons with NaN are false
# so we test for the "wrong" condition and invert the result
censored = sample_frame.loc[ ~(sample_frame['Cq'] > (min_ntcs.loc[sample_frame['Target']] - margin)) ]
return censored | Selects rows from the sample data frame that fall `margin` or greater
cycles earlier than the NTC for that target. NTC wells are recognized by
string matching against the Sample column.
:param DataFrame sample_frame: A sample data frame.
:param iterable ntc_samples: A sequence of strings giving the sample names of your NTC wells, i.e. ['NTC']
:param float margin: The number of cycles earlier than the NTC for a "good" sample, i.e. log2(10)
:return: a view of the sample data frame containing only non-background rows
:rtype: DataFrame | Below is the the instruction that describes the task:
### Input:
Selects rows from the sample data frame that fall `margin` or greater
cycles earlier than the NTC for that target. NTC wells are recognized by
string matching against the Sample column.
:param DataFrame sample_frame: A sample data frame.
:param iterable ntc_samples: A sequence of strings giving the sample names of your NTC wells, i.e. ['NTC']
:param float margin: The number of cycles earlier than the NTC for a "good" sample, i.e. log2(10)
:return: a view of the sample data frame containing only non-background rows
:rtype: DataFrame
### Response:
def censor_background(sample_frame, ntc_samples=['NTC'], margin=log2(10)):
"""Selects rows from the sample data frame that fall `margin` or greater
cycles earlier than the NTC for that target. NTC wells are recognized by
string matching against the Sample column.
:param DataFrame sample_frame: A sample data frame.
:param iterable ntc_samples: A sequence of strings giving the sample names of your NTC wells, i.e. ['NTC']
:param float margin: The number of cycles earlier than the NTC for a "good" sample, i.e. log2(10)
:return: a view of the sample data frame containing only non-background rows
:rtype: DataFrame
"""
ntcs = sample_frame.loc[ sample_frame['Sample'].apply(lambda x: x in ntc_samples), ]
if ntcs.empty:
return sample_frame
g = ntcs.groupby('Target')
min_ntcs = g['Cq'].min()
# if a target has no NTC, min_ntcs.loc[sample] is NaN
# we should retain all values from targets with no NTC
# all comparisons with NaN are false
# so we test for the "wrong" condition and invert the result
censored = sample_frame.loc[ ~(sample_frame['Cq'] > (min_ntcs.loc[sample_frame['Target']] - margin)) ]
return censored |
def clustering_coef_bu(G):
'''
The clustering coefficient is the fraction of triangles around a node
(equiv. the fraction of nodes neighbors that are neighbors of each other).
Parameters
----------
A : NxN np.ndarray
binary undirected connection matrix
Returns
-------
C : Nx1 np.ndarray
clustering coefficient vector
'''
n = len(G)
C = np.zeros((n,))
for u in range(n):
V, = np.where(G[u, :])
k = len(V)
if k >= 2: # degree must be at least 2
S = G[np.ix_(V, V)]
C[u] = np.sum(S) / (k * k - k)
return C | The clustering coefficient is the fraction of triangles around a node
(equiv. the fraction of nodes neighbors that are neighbors of each other).
Parameters
----------
A : NxN np.ndarray
binary undirected connection matrix
Returns
-------
C : Nx1 np.ndarray
clustering coefficient vector | Below is the the instruction that describes the task:
### Input:
The clustering coefficient is the fraction of triangles around a node
(equiv. the fraction of nodes neighbors that are neighbors of each other).
Parameters
----------
A : NxN np.ndarray
binary undirected connection matrix
Returns
-------
C : Nx1 np.ndarray
clustering coefficient vector
### Response:
def clustering_coef_bu(G):
'''
The clustering coefficient is the fraction of triangles around a node
(equiv. the fraction of nodes neighbors that are neighbors of each other).
Parameters
----------
A : NxN np.ndarray
binary undirected connection matrix
Returns
-------
C : Nx1 np.ndarray
clustering coefficient vector
'''
n = len(G)
C = np.zeros((n,))
for u in range(n):
V, = np.where(G[u, :])
k = len(V)
if k >= 2: # degree must be at least 2
S = G[np.ix_(V, V)]
C[u] = np.sum(S) / (k * k - k)
return C |
def get_ht_capability(cap):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n541.
Positional arguments:
cap -- c_uint16
Returns:
List.
"""
answers = list()
if cap & 1:
answers.append('RX LDPC')
if cap & 2:
answers.append('HT20/HT40')
if not cap & 2:
answers.append('HT20')
if (cap >> 2) & 0x3 == 0:
answers.append('Static SM Power Save')
if (cap >> 2) & 0x3 == 1:
answers.append('Dynamic SM Power Save')
if (cap >> 2) & 0x3 == 3:
answers.append('SM Power Save disabled')
if cap & 16:
answers.append('RX Greenfield')
if cap & 32:
answers.append('RX HT20 SGI')
if cap & 64:
answers.append('RX HT40 SGI')
if cap & 128:
answers.append('TX STBC')
if (cap >> 8) & 0x3 == 0:
answers.append('No RX STBC')
if (cap >> 8) & 0x3 == 1:
answers.append('RX STBC 1-stream')
if (cap >> 8) & 0x3 == 2:
answers.append('RX STBC 2-streams')
if (cap >> 8) & 0x3 == 3:
answers.append('RX STBC 3-streams')
if cap & 1024:
answers.append('HT Delayed Block Ack')
if not cap & 2048:
answers.append('Max AMSDU length: 3839 bytes')
if cap & 2048:
answers.append('Max AMSDU length: 7935 bytes')
if cap & 4096:
answers.append('DSSS/CCK HT40')
if not cap & 4096:
answers.append('No DSSS/CCK HT40')
if cap & 16384:
answers.append('40 MHz Intolerant')
if cap & 32768:
answers.append('L-SIG TXOP protection')
return answers | http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n541.
Positional arguments:
cap -- c_uint16
Returns:
List. | Below is the the instruction that describes the task:
### Input:
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n541.
Positional arguments:
cap -- c_uint16
Returns:
List.
### Response:
def get_ht_capability(cap):
"""http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n541.
Positional arguments:
cap -- c_uint16
Returns:
List.
"""
answers = list()
if cap & 1:
answers.append('RX LDPC')
if cap & 2:
answers.append('HT20/HT40')
if not cap & 2:
answers.append('HT20')
if (cap >> 2) & 0x3 == 0:
answers.append('Static SM Power Save')
if (cap >> 2) & 0x3 == 1:
answers.append('Dynamic SM Power Save')
if (cap >> 2) & 0x3 == 3:
answers.append('SM Power Save disabled')
if cap & 16:
answers.append('RX Greenfield')
if cap & 32:
answers.append('RX HT20 SGI')
if cap & 64:
answers.append('RX HT40 SGI')
if cap & 128:
answers.append('TX STBC')
if (cap >> 8) & 0x3 == 0:
answers.append('No RX STBC')
if (cap >> 8) & 0x3 == 1:
answers.append('RX STBC 1-stream')
if (cap >> 8) & 0x3 == 2:
answers.append('RX STBC 2-streams')
if (cap >> 8) & 0x3 == 3:
answers.append('RX STBC 3-streams')
if cap & 1024:
answers.append('HT Delayed Block Ack')
if not cap & 2048:
answers.append('Max AMSDU length: 3839 bytes')
if cap & 2048:
answers.append('Max AMSDU length: 7935 bytes')
if cap & 4096:
answers.append('DSSS/CCK HT40')
if not cap & 4096:
answers.append('No DSSS/CCK HT40')
if cap & 16384:
answers.append('40 MHz Intolerant')
if cap & 32768:
answers.append('L-SIG TXOP protection')
return answers |
def is_valid(self, name=None, debug=False):
"""
Check to see if the current xml path is to be processed.
"""
valid_tags = self.action_tree
invalid = False
for item in self.current_tree:
try:
if item in valid_tags or self.ALL_TAGS in valid_tags:
valid_tags = valid_tags[item if item in valid_tags else self.ALL_TAGS]
else:
valid_tags = None
invalid = True
break
except (KeyError, TypeError) as e: # object is either missing the key or is not a dictionary type
invalid = True
break
if debug:
print name, not invalid and valid_tags is not None
return not invalid and valid_tags is not None | Check to see if the current xml path is to be processed. | Below is the the instruction that describes the task:
### Input:
Check to see if the current xml path is to be processed.
### Response:
def is_valid(self, name=None, debug=False):
"""
Check to see if the current xml path is to be processed.
"""
valid_tags = self.action_tree
invalid = False
for item in self.current_tree:
try:
if item in valid_tags or self.ALL_TAGS in valid_tags:
valid_tags = valid_tags[item if item in valid_tags else self.ALL_TAGS]
else:
valid_tags = None
invalid = True
break
except (KeyError, TypeError) as e: # object is either missing the key or is not a dictionary type
invalid = True
break
if debug:
print name, not invalid and valid_tags is not None
return not invalid and valid_tags is not None |
def software_fibonacci(n):
""" a normal old python function to return the Nth fibonacci number. """
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a | a normal old python function to return the Nth fibonacci number. | Below is the the instruction that describes the task:
### Input:
a normal old python function to return the Nth fibonacci number.
### Response:
def software_fibonacci(n):
""" a normal old python function to return the Nth fibonacci number. """
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a |
def set_context(self, filename):
"""
Provide filename context to airflow task handler.
:param filename: filename in which the dag is located
"""
local_loc = self._init_file(filename)
self.handler = logging.FileHandler(local_loc)
self.handler.setFormatter(self.formatter)
self.handler.setLevel(self.level)
if self._cur_date < datetime.today():
self._symlink_latest_log_directory()
self._cur_date = datetime.today() | Provide filename context to airflow task handler.
:param filename: filename in which the dag is located | Below is the the instruction that describes the task:
### Input:
Provide filename context to airflow task handler.
:param filename: filename in which the dag is located
### Response:
def set_context(self, filename):
"""
Provide filename context to airflow task handler.
:param filename: filename in which the dag is located
"""
local_loc = self._init_file(filename)
self.handler = logging.FileHandler(local_loc)
self.handler.setFormatter(self.formatter)
self.handler.setLevel(self.level)
if self._cur_date < datetime.today():
self._symlink_latest_log_directory()
self._cur_date = datetime.today() |
def is_subdict(self, a,b):
'''
Return True if a is a subdict of b
'''
return all((k in b and b[k]==v) for k,v in a.iteritems()) | Return True if a is a subdict of b | Below is the the instruction that describes the task:
### Input:
Return True if a is a subdict of b
### Response:
def is_subdict(self, a,b):
'''
Return True if a is a subdict of b
'''
return all((k in b and b[k]==v) for k,v in a.iteritems()) |
def insert(self, bs, pos=None):
"""Insert bs at bit position pos.
bs -- The bitstring to insert.
pos -- The bit position to insert at.
Raises ValueError if pos < 0 or pos > self.len.
"""
bs = Bits(bs)
if not bs.len:
return self
if bs is self:
bs = self.__copy__()
if pos is None:
try:
pos = self._pos
except AttributeError:
raise TypeError("insert require a bit position for this type.")
if pos < 0:
pos += self.len
if not 0 <= pos <= self.len:
raise ValueError("Invalid insert position.")
self._insert(bs, pos) | Insert bs at bit position pos.
bs -- The bitstring to insert.
pos -- The bit position to insert at.
Raises ValueError if pos < 0 or pos > self.len. | Below is the the instruction that describes the task:
### Input:
Insert bs at bit position pos.
bs -- The bitstring to insert.
pos -- The bit position to insert at.
Raises ValueError if pos < 0 or pos > self.len.
### Response:
def insert(self, bs, pos=None):
"""Insert bs at bit position pos.
bs -- The bitstring to insert.
pos -- The bit position to insert at.
Raises ValueError if pos < 0 or pos > self.len.
"""
bs = Bits(bs)
if not bs.len:
return self
if bs is self:
bs = self.__copy__()
if pos is None:
try:
pos = self._pos
except AttributeError:
raise TypeError("insert require a bit position for this type.")
if pos < 0:
pos += self.len
if not 0 <= pos <= self.len:
raise ValueError("Invalid insert position.")
self._insert(bs, pos) |
def get_collection(self, session, query, api_key):
"""
Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model
"""
model = self._fetch_model(api_key)
include = self._parse_include(query.get('include', '').split(','))
fields = self._parse_fields(query)
included = {}
sorts = query.get('sort', '').split(',')
order_by = []
collection = session.query(model)
for attr in sorts:
if attr == '':
break
attr_name, is_asc = [attr[1:], False]\
if attr[0] == '-'\
else [attr, True]
if attr_name not in model.__mapper__.all_orm_descriptors.keys()\
or not hasattr(model, attr_name)\
or attr_name in model.__mapper__.relationships.keys():
return NotSortableError(model, attr_name)
attr = getattr(model, attr_name)
if not hasattr(attr, 'asc'):
# pragma: no cover
return NotSortableError(model, attr_name)
check_permission(model, attr_name, Permissions.VIEW)
order_by.append(attr.asc() if is_asc else attr.desc())
if len(order_by) > 0:
collection = collection.order_by(*order_by)
pos = -1
start, end = self._parse_page(query)
response = JSONAPIResponse()
response.data['data'] = []
for instance in collection:
try:
check_permission(instance, None, Permissions.VIEW)
except PermissionDeniedError:
continue
pos += 1
if end is not None and (pos < start or pos > end):
continue
built = self._render_full_resource(instance, include, fields)
included.update(built.pop('included'))
response.data['data'].append(built)
response.data['included'] = list(included.values())
return response | Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model | Below is the the instruction that describes the task:
### Input:
Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model
### Response:
def get_collection(self, session, query, api_key):
"""
Fetch a collection of resources of a specified type.
:param session: SQLAlchemy session
:param query: Dict of query args
:param api_type: The type of the model
"""
model = self._fetch_model(api_key)
include = self._parse_include(query.get('include', '').split(','))
fields = self._parse_fields(query)
included = {}
sorts = query.get('sort', '').split(',')
order_by = []
collection = session.query(model)
for attr in sorts:
if attr == '':
break
attr_name, is_asc = [attr[1:], False]\
if attr[0] == '-'\
else [attr, True]
if attr_name not in model.__mapper__.all_orm_descriptors.keys()\
or not hasattr(model, attr_name)\
or attr_name in model.__mapper__.relationships.keys():
return NotSortableError(model, attr_name)
attr = getattr(model, attr_name)
if not hasattr(attr, 'asc'):
# pragma: no cover
return NotSortableError(model, attr_name)
check_permission(model, attr_name, Permissions.VIEW)
order_by.append(attr.asc() if is_asc else attr.desc())
if len(order_by) > 0:
collection = collection.order_by(*order_by)
pos = -1
start, end = self._parse_page(query)
response = JSONAPIResponse()
response.data['data'] = []
for instance in collection:
try:
check_permission(instance, None, Permissions.VIEW)
except PermissionDeniedError:
continue
pos += 1
if end is not None and (pos < start or pos > end):
continue
built = self._render_full_resource(instance, include, fields)
included.update(built.pop('included'))
response.data['data'].append(built)
response.data['included'] = list(included.values())
return response |
def add_child(self, child):
"""
Adds self as parent to child, and then adds child.
"""
child.parent = self
self.children.append(child)
return child | Adds self as parent to child, and then adds child. | Below is the the instruction that describes the task:
### Input:
Adds self as parent to child, and then adds child.
### Response:
def add_child(self, child):
"""
Adds self as parent to child, and then adds child.
"""
child.parent = self
self.children.append(child)
return child |
def pluck(self, key):
"""
Convenience version of a common use case of
`map`: fetching a property.
"""
return self._wrap([x.get(key) for x in self.obj]) | Convenience version of a common use case of
`map`: fetching a property. | Below is the the instruction that describes the task:
### Input:
Convenience version of a common use case of
`map`: fetching a property.
### Response:
def pluck(self, key):
"""
Convenience version of a common use case of
`map`: fetching a property.
"""
return self._wrap([x.get(key) for x in self.obj]) |
def drawBackground( self, painter, rect ):
"""
Draws the backgrounds for the different chart types.
:param painter | <QPainter>
rect | <QRect>
"""
if ( self._dirty ):
self.rebuild()
if ( self.showGrid() ):
self.drawGrid(painter) | Draws the backgrounds for the different chart types.
:param painter | <QPainter>
rect | <QRect> | Below is the the instruction that describes the task:
### Input:
Draws the backgrounds for the different chart types.
:param painter | <QPainter>
rect | <QRect>
### Response:
def drawBackground( self, painter, rect ):
"""
Draws the backgrounds for the different chart types.
:param painter | <QPainter>
rect | <QRect>
"""
if ( self._dirty ):
self.rebuild()
if ( self.showGrid() ):
self.drawGrid(painter) |
def _or_query(self, term_list, field, field_type):
"""
Joins each item of term_list decorated by _term_query with an OR.
"""
term_list = [self._term_query(term, field, field_type) for term in term_list]
return xapian.Query(xapian.Query.OP_OR, term_list) | Joins each item of term_list decorated by _term_query with an OR. | Below is the the instruction that describes the task:
### Input:
Joins each item of term_list decorated by _term_query with an OR.
### Response:
def _or_query(self, term_list, field, field_type):
"""
Joins each item of term_list decorated by _term_query with an OR.
"""
term_list = [self._term_query(term, field, field_type) for term in term_list]
return xapian.Query(xapian.Query.OP_OR, term_list) |
def has_insert(self, shape):
"""Returns True if any of the inserts have the given shape."""
for insert in self.inserts:
if insert.shape == shape:
return True
return False | Returns True if any of the inserts have the given shape. | Below is the the instruction that describes the task:
### Input:
Returns True if any of the inserts have the given shape.
### Response:
def has_insert(self, shape):
"""Returns True if any of the inserts have the given shape."""
for insert in self.inserts:
if insert.shape == shape:
return True
return False |
def _gen_ticket(prefix=None, lg=settings.CAS_TICKET_LEN):
"""
Generate a ticket with prefix ``prefix`` and length ``lg``
:param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU)
:param int lg: The length of the generated ticket (with the prefix)
:return: A randomlly generated ticket of length ``lg``
:rtype: unicode
"""
random_part = u''.join(
random.choice(
string.ascii_letters + string.digits
) for _ in range(lg - len(prefix or "") - 1)
)
if prefix is not None:
return u'%s-%s' % (prefix, random_part)
else:
return random_part | Generate a ticket with prefix ``prefix`` and length ``lg``
:param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU)
:param int lg: The length of the generated ticket (with the prefix)
:return: A randomlly generated ticket of length ``lg``
:rtype: unicode | Below is the the instruction that describes the task:
### Input:
Generate a ticket with prefix ``prefix`` and length ``lg``
:param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU)
:param int lg: The length of the generated ticket (with the prefix)
:return: A randomlly generated ticket of length ``lg``
:rtype: unicode
### Response:
def _gen_ticket(prefix=None, lg=settings.CAS_TICKET_LEN):
"""
Generate a ticket with prefix ``prefix`` and length ``lg``
:param unicode prefix: An optional prefix (probably ST, PT, PGT or PGTIOU)
:param int lg: The length of the generated ticket (with the prefix)
:return: A randomlly generated ticket of length ``lg``
:rtype: unicode
"""
random_part = u''.join(
random.choice(
string.ascii_letters + string.digits
) for _ in range(lg - len(prefix or "") - 1)
)
if prefix is not None:
return u'%s-%s' % (prefix, random_part)
else:
return random_part |
def makerandCIJ_dir(n, k, seed=None):
'''
This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
rng = get_rng(seed)
ix, = np.where(np.logical_not(np.eye(n)).flat)
rp = rng.permutation(np.size(ix))
CIJ = np.zeros((n, n))
CIJ.flat[ix[rp][:k]] = 1
return CIJ | This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal. | Below is the the instruction that describes the task:
### Input:
This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal.
### Response:
def makerandCIJ_dir(n, k, seed=None):
'''
This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
rng = get_rng(seed)
ix, = np.where(np.logical_not(np.eye(n)).flat)
rp = rng.permutation(np.size(ix))
CIJ = np.zeros((n, n))
CIJ.flat[ix[rp][:k]] = 1
return CIJ |
def remove_product_version(self, id, product_version_id, **kwargs):
"""
Removes a product version from the specified config set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_product_version(id, product_version_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration set id (required)
:param int product_version_id: Product version id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.remove_product_version_with_http_info(id, product_version_id, **kwargs)
else:
(data) = self.remove_product_version_with_http_info(id, product_version_id, **kwargs)
return data | Removes a product version from the specified config set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_product_version(id, product_version_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration set id (required)
:param int product_version_id: Product version id (required)
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Removes a product version from the specified config set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_product_version(id, product_version_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration set id (required)
:param int product_version_id: Product version id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def remove_product_version(self, id, product_version_id, **kwargs):
"""
Removes a product version from the specified config set
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.remove_product_version(id, product_version_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: Build configuration set id (required)
:param int product_version_id: Product version id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.remove_product_version_with_http_info(id, product_version_id, **kwargs)
else:
(data) = self.remove_product_version_with_http_info(id, product_version_id, **kwargs)
return data |
def subtract_metabolites(self, metabolites, combine=True, reversibly=True):
"""Subtract metabolites from a reaction.
That means add the metabolites with -1*coefficient. If the final
coefficient for a metabolite is 0 then the metabolite is removed from
the reaction.
Notes
-----
* A final coefficient < 0 implies a reactant.
* The change is reverted upon exit when using the model as a context.
Parameters
----------
metabolites : dict
Dictionary where the keys are of class Metabolite and the values
are the coefficients. These metabolites will be added to the
reaction.
combine : bool
Describes behavior a metabolite already exists in the reaction.
True causes the coefficients to be added.
False causes the coefficient to be replaced.
reversibly : bool
Whether to add the change to the context to make the change
reversibly or not (primarily intended for internal use).
"""
self.add_metabolites({
k: -v for k, v in iteritems(metabolites)},
combine=combine, reversibly=reversibly) | Subtract metabolites from a reaction.
That means add the metabolites with -1*coefficient. If the final
coefficient for a metabolite is 0 then the metabolite is removed from
the reaction.
Notes
-----
* A final coefficient < 0 implies a reactant.
* The change is reverted upon exit when using the model as a context.
Parameters
----------
metabolites : dict
Dictionary where the keys are of class Metabolite and the values
are the coefficients. These metabolites will be added to the
reaction.
combine : bool
Describes behavior a metabolite already exists in the reaction.
True causes the coefficients to be added.
False causes the coefficient to be replaced.
reversibly : bool
Whether to add the change to the context to make the change
reversibly or not (primarily intended for internal use). | Below is the the instruction that describes the task:
### Input:
Subtract metabolites from a reaction.
That means add the metabolites with -1*coefficient. If the final
coefficient for a metabolite is 0 then the metabolite is removed from
the reaction.
Notes
-----
* A final coefficient < 0 implies a reactant.
* The change is reverted upon exit when using the model as a context.
Parameters
----------
metabolites : dict
Dictionary where the keys are of class Metabolite and the values
are the coefficients. These metabolites will be added to the
reaction.
combine : bool
Describes behavior a metabolite already exists in the reaction.
True causes the coefficients to be added.
False causes the coefficient to be replaced.
reversibly : bool
Whether to add the change to the context to make the change
reversibly or not (primarily intended for internal use).
### Response:
def subtract_metabolites(self, metabolites, combine=True, reversibly=True):
"""Subtract metabolites from a reaction.
That means add the metabolites with -1*coefficient. If the final
coefficient for a metabolite is 0 then the metabolite is removed from
the reaction.
Notes
-----
* A final coefficient < 0 implies a reactant.
* The change is reverted upon exit when using the model as a context.
Parameters
----------
metabolites : dict
Dictionary where the keys are of class Metabolite and the values
are the coefficients. These metabolites will be added to the
reaction.
combine : bool
Describes behavior a metabolite already exists in the reaction.
True causes the coefficients to be added.
False causes the coefficient to be replaced.
reversibly : bool
Whether to add the change to the context to make the change
reversibly or not (primarily intended for internal use).
"""
self.add_metabolites({
k: -v for k, v in iteritems(metabolites)},
combine=combine, reversibly=reversibly) |
def create_str(help_string=NO_HELP, default=NO_DEFAULT):
# type: (str, Union[str, NO_DEFAULT_TYPE]) -> str
"""
Create a string parameter
:param help_string:
:param default:
:return:
"""
# noinspection PyTypeChecker
return ParamFunctions(
help_string=help_string,
default=default,
type_name="str",
function_s2t=convert_string_to_string,
function_t2s=convert_string_to_string,
) | Create a string parameter
:param help_string:
:param default:
:return: | Below is the the instruction that describes the task:
### Input:
Create a string parameter
:param help_string:
:param default:
:return:
### Response:
def create_str(help_string=NO_HELP, default=NO_DEFAULT):
# type: (str, Union[str, NO_DEFAULT_TYPE]) -> str
"""
Create a string parameter
:param help_string:
:param default:
:return:
"""
# noinspection PyTypeChecker
return ParamFunctions(
help_string=help_string,
default=default,
type_name="str",
function_s2t=convert_string_to_string,
function_t2s=convert_string_to_string,
) |
def call_only_once(func):
"""
Decorate a method or property of a class, so that this method can only
be called once for every instance.
Calling it more than once will result in exception.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
# cannot use hasattr here, because hasattr tries to getattr, which
# fails if func is a property
assert func.__name__ in dir(self), "call_only_once can only be used on method or property!"
if not hasattr(self, '_CALL_ONLY_ONCE_CACHE'):
cache = self._CALL_ONLY_ONCE_CACHE = set()
else:
cache = self._CALL_ONLY_ONCE_CACHE
cls = type(self)
# cannot use ismethod(), because decorated method becomes a function
is_method = inspect.isfunction(getattr(cls, func.__name__))
assert func not in cache, \
"{} {}.{} can only be called once per object!".format(
'Method' if is_method else 'Property',
cls.__name__, func.__name__)
cache.add(func)
return func(*args, **kwargs)
return wrapper | Decorate a method or property of a class, so that this method can only
be called once for every instance.
Calling it more than once will result in exception. | Below is the the instruction that describes the task:
### Input:
Decorate a method or property of a class, so that this method can only
be called once for every instance.
Calling it more than once will result in exception.
### Response:
def call_only_once(func):
"""
Decorate a method or property of a class, so that this method can only
be called once for every instance.
Calling it more than once will result in exception.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
# cannot use hasattr here, because hasattr tries to getattr, which
# fails if func is a property
assert func.__name__ in dir(self), "call_only_once can only be used on method or property!"
if not hasattr(self, '_CALL_ONLY_ONCE_CACHE'):
cache = self._CALL_ONLY_ONCE_CACHE = set()
else:
cache = self._CALL_ONLY_ONCE_CACHE
cls = type(self)
# cannot use ismethod(), because decorated method becomes a function
is_method = inspect.isfunction(getattr(cls, func.__name__))
assert func not in cache, \
"{} {}.{} can only be called once per object!".format(
'Method' if is_method else 'Property',
cls.__name__, func.__name__)
cache.add(func)
return func(*args, **kwargs)
return wrapper |
def kernels_initialize(self, folder):
""" create a new kernel in a specified folder from template, including
json metadata that grabs values from the configuration.
Parameters
==========
folder: the path of the folder
"""
if not os.path.isdir(folder):
raise ValueError('Invalid folder: ' + folder)
resources = []
resource = {'path': 'INSERT_SCRIPT_PATH_HERE'}
resources.append(resource)
username = self.get_config_value(self.CONFIG_NAME_USER)
meta_data = {
'id': username + '/INSERT_KERNEL_SLUG_HERE',
'title': 'INSERT_TITLE_HERE',
'code_file': 'INSERT_CODE_FILE_PATH_HERE',
'language': 'INSERT_LANGUAGE_HERE',
'kernel_type': 'INSERT_KERNEL_TYPE_HERE',
'is_private': 'true',
'enable_gpu': 'false',
'enable_internet': 'false',
'dataset_sources': [],
'competition_sources': [],
'kernel_sources': [],
}
meta_file = os.path.join(folder, self.KERNEL_METADATA_FILE)
with open(meta_file, 'w') as f:
json.dump(meta_data, f, indent=2)
return meta_file | create a new kernel in a specified folder from template, including
json metadata that grabs values from the configuration.
Parameters
==========
folder: the path of the folder | Below is the the instruction that describes the task:
### Input:
create a new kernel in a specified folder from template, including
json metadata that grabs values from the configuration.
Parameters
==========
folder: the path of the folder
### Response:
def kernels_initialize(self, folder):
""" create a new kernel in a specified folder from template, including
json metadata that grabs values from the configuration.
Parameters
==========
folder: the path of the folder
"""
if not os.path.isdir(folder):
raise ValueError('Invalid folder: ' + folder)
resources = []
resource = {'path': 'INSERT_SCRIPT_PATH_HERE'}
resources.append(resource)
username = self.get_config_value(self.CONFIG_NAME_USER)
meta_data = {
'id': username + '/INSERT_KERNEL_SLUG_HERE',
'title': 'INSERT_TITLE_HERE',
'code_file': 'INSERT_CODE_FILE_PATH_HERE',
'language': 'INSERT_LANGUAGE_HERE',
'kernel_type': 'INSERT_KERNEL_TYPE_HERE',
'is_private': 'true',
'enable_gpu': 'false',
'enable_internet': 'false',
'dataset_sources': [],
'competition_sources': [],
'kernel_sources': [],
}
meta_file = os.path.join(folder, self.KERNEL_METADATA_FILE)
with open(meta_file, 'w') as f:
json.dump(meta_data, f, indent=2)
return meta_file |
def close(self):
"""The close operation loads the session if it is valid and then closes
it and releases the session seat. All the session data are deleted and
become invalid after the request is processed. The session ID can no
longer be used in subsequent requests."""
if self._SID:
self._auth.service.closeSession()
self._SID = None | The close operation loads the session if it is valid and then closes
it and releases the session seat. All the session data are deleted and
become invalid after the request is processed. The session ID can no
longer be used in subsequent requests. | Below is the the instruction that describes the task:
### Input:
The close operation loads the session if it is valid and then closes
it and releases the session seat. All the session data are deleted and
become invalid after the request is processed. The session ID can no
longer be used in subsequent requests.
### Response:
def close(self):
"""The close operation loads the session if it is valid and then closes
it and releases the session seat. All the session data are deleted and
become invalid after the request is processed. The session ID can no
longer be used in subsequent requests."""
if self._SID:
self._auth.service.closeSession()
self._SID = None |
def is_purrlog(path):
"""Checks if path refers to a valid purrlog.
Path must exist, and must contain either at least one directory called entry-YYYYMMDD-HHMMSS, or the file "dirconfig"
"""
if not os.path.isdir(path):
return False
if list(filter(os.path.isdir, glob.glob(
os.path.join(path, "entry-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]")))):
return True
return os.path.exists(os.path.join(path, "dirconfig")) | Checks if path refers to a valid purrlog.
Path must exist, and must contain either at least one directory called entry-YYYYMMDD-HHMMSS, or the file "dirconfig" | Below is the the instruction that describes the task:
### Input:
Checks if path refers to a valid purrlog.
Path must exist, and must contain either at least one directory called entry-YYYYMMDD-HHMMSS, or the file "dirconfig"
### Response:
def is_purrlog(path):
"""Checks if path refers to a valid purrlog.
Path must exist, and must contain either at least one directory called entry-YYYYMMDD-HHMMSS, or the file "dirconfig"
"""
if not os.path.isdir(path):
return False
if list(filter(os.path.isdir, glob.glob(
os.path.join(path, "entry-[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]-[0-9][0-9][0-9][0-9][0-9][0-9]")))):
return True
return os.path.exists(os.path.join(path, "dirconfig")) |
def retrain(self):
"""Train for a session, pulling in any new data from the filesystem"""
folder = TrainData.from_folder(self.args.folder)
train_data, test_data = folder.load(True, not self.args.no_validation)
train_data = TrainData.merge(train_data, self.sampled_data)
test_data = TrainData.merge(test_data, self.test)
train_inputs, train_outputs = train_data
print()
try:
self.listener.runner.model.fit(
train_inputs, train_outputs, self.args.batch_size, self.epoch + self.args.epochs,
validation_data=test_data, callbacks=self.callbacks, initial_epoch=self.epoch
)
finally:
self.listener.runner.model.save(self.args.model) | Train for a session, pulling in any new data from the filesystem | Below is the the instruction that describes the task:
### Input:
Train for a session, pulling in any new data from the filesystem
### Response:
def retrain(self):
"""Train for a session, pulling in any new data from the filesystem"""
folder = TrainData.from_folder(self.args.folder)
train_data, test_data = folder.load(True, not self.args.no_validation)
train_data = TrainData.merge(train_data, self.sampled_data)
test_data = TrainData.merge(test_data, self.test)
train_inputs, train_outputs = train_data
print()
try:
self.listener.runner.model.fit(
train_inputs, train_outputs, self.args.batch_size, self.epoch + self.args.epochs,
validation_data=test_data, callbacks=self.callbacks, initial_epoch=self.epoch
)
finally:
self.listener.runner.model.save(self.args.model) |
def describe_field(k, v, timestamp_parser=default_timestamp_parser):
"""Given a key representing a column name and value representing the value
stored in the column, return a representation of the BigQuery schema
element describing that field. Raise errors if invalid value types are
provided.
Parameters
----------
k : Union[str, unicode]
Key representing the column
v : Union[str, unicode, int, float, datetime, object]
Value mapped to by `k`
Returns
-------
object
Describing the field
Raises
------
Exception
If invalid value types are provided.
Examples
--------
>>> describe_field("username", "Bob")
{"name": "username", "type": "string", "mode": "nullable"}
>>> describe_field("users", [{"username": "Bob"}])
{"name": "users", "type": "record", "mode": "repeated",
"fields": [{"name":"username","type":"string","mode":"nullable"}]}
"""
def bq_schema_field(name, bq_type, mode):
return {"name": name, "type": bq_type, "mode": mode}
if isinstance(v, list):
if len(v) == 0:
raise Exception(
"Can't describe schema because of empty list {0}:[]".format(k))
v = v[0]
mode = "repeated"
else:
mode = "nullable"
bq_type = bigquery_type(v, timestamp_parser=timestamp_parser)
if not bq_type:
raise InvalidTypeException(k, v)
field = bq_schema_field(k, bq_type, mode)
if bq_type == "record":
try:
field['fields'] = schema_from_record(v, timestamp_parser)
except InvalidTypeException as e:
# recursively construct the key causing the error
raise InvalidTypeException("%s.%s" % (k, e.key), e.value)
return field | Given a key representing a column name and value representing the value
stored in the column, return a representation of the BigQuery schema
element describing that field. Raise errors if invalid value types are
provided.
Parameters
----------
k : Union[str, unicode]
Key representing the column
v : Union[str, unicode, int, float, datetime, object]
Value mapped to by `k`
Returns
-------
object
Describing the field
Raises
------
Exception
If invalid value types are provided.
Examples
--------
>>> describe_field("username", "Bob")
{"name": "username", "type": "string", "mode": "nullable"}
>>> describe_field("users", [{"username": "Bob"}])
{"name": "users", "type": "record", "mode": "repeated",
"fields": [{"name":"username","type":"string","mode":"nullable"}]} | Below is the the instruction that describes the task:
### Input:
Given a key representing a column name and value representing the value
stored in the column, return a representation of the BigQuery schema
element describing that field. Raise errors if invalid value types are
provided.
Parameters
----------
k : Union[str, unicode]
Key representing the column
v : Union[str, unicode, int, float, datetime, object]
Value mapped to by `k`
Returns
-------
object
Describing the field
Raises
------
Exception
If invalid value types are provided.
Examples
--------
>>> describe_field("username", "Bob")
{"name": "username", "type": "string", "mode": "nullable"}
>>> describe_field("users", [{"username": "Bob"}])
{"name": "users", "type": "record", "mode": "repeated",
"fields": [{"name":"username","type":"string","mode":"nullable"}]}
### Response:
def describe_field(k, v, timestamp_parser=default_timestamp_parser):
"""Given a key representing a column name and value representing the value
stored in the column, return a representation of the BigQuery schema
element describing that field. Raise errors if invalid value types are
provided.
Parameters
----------
k : Union[str, unicode]
Key representing the column
v : Union[str, unicode, int, float, datetime, object]
Value mapped to by `k`
Returns
-------
object
Describing the field
Raises
------
Exception
If invalid value types are provided.
Examples
--------
>>> describe_field("username", "Bob")
{"name": "username", "type": "string", "mode": "nullable"}
>>> describe_field("users", [{"username": "Bob"}])
{"name": "users", "type": "record", "mode": "repeated",
"fields": [{"name":"username","type":"string","mode":"nullable"}]}
"""
def bq_schema_field(name, bq_type, mode):
return {"name": name, "type": bq_type, "mode": mode}
if isinstance(v, list):
if len(v) == 0:
raise Exception(
"Can't describe schema because of empty list {0}:[]".format(k))
v = v[0]
mode = "repeated"
else:
mode = "nullable"
bq_type = bigquery_type(v, timestamp_parser=timestamp_parser)
if not bq_type:
raise InvalidTypeException(k, v)
field = bq_schema_field(k, bq_type, mode)
if bq_type == "record":
try:
field['fields'] = schema_from_record(v, timestamp_parser)
except InvalidTypeException as e:
# recursively construct the key causing the error
raise InvalidTypeException("%s.%s" % (k, e.key), e.value)
return field |
def is_prime( n ):
"""Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million.
"""
# (This is used to study the risk of false positives:)
global miller_rabin_test_count
miller_rabin_test_count = 0
if n <= smallprimes[-1]:
if n in smallprimes: return True
else: return False
if gcd( n, 2*3*5*7*11 ) != 1: return False
# Choose a number of iterations sufficient to reduce the
# probability of accepting a composite below 2**-80
# (from Menezes et al. Table 4.4):
t = 40
n_bits = 1 + int( math.log( n, 2 ) )
for k, tt in ( ( 100, 27 ),
( 150, 18 ),
( 200, 15 ),
( 250, 12 ),
( 300, 9 ),
( 350, 8 ),
( 400, 7 ),
( 450, 6 ),
( 550, 5 ),
( 650, 4 ),
( 850, 3 ),
( 1300, 2 ),
):
if n_bits < k: break
t = tt
# Run the test t times:
s = 0
r = n - 1
while ( r % 2 ) == 0:
s = s + 1
r = r // 2
for i in range( t ):
a = smallprimes[ i ]
y = modular_exp( a, r, n )
if y != 1 and y != n-1:
j = 1
while j <= s - 1 and y != n - 1:
y = modular_exp( y, 2, n )
if y == 1:
miller_rabin_test_count = i + 1
return False
j = j + 1
if y != n-1:
miller_rabin_test_count = i + 1
return False
return True | Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million. | Below is the the instruction that describes the task:
### Input:
Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million.
### Response:
def is_prime( n ):
"""Return True if x is prime, False otherwise.
We use the Miller-Rabin test, as given in Menezes et al. p. 138.
This test is not exact: there are composite values n for which
it returns True.
In testing the odd numbers from 10000001 to 19999999,
about 66 composites got past the first test,
5 got past the second test, and none got past the third.
Since factors of 2, 3, 5, 7, and 11 were detected during
preliminary screening, the number of numbers tested by
Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
= 4.57 million.
"""
# (This is used to study the risk of false positives:)
global miller_rabin_test_count
miller_rabin_test_count = 0
if n <= smallprimes[-1]:
if n in smallprimes: return True
else: return False
if gcd( n, 2*3*5*7*11 ) != 1: return False
# Choose a number of iterations sufficient to reduce the
# probability of accepting a composite below 2**-80
# (from Menezes et al. Table 4.4):
t = 40
n_bits = 1 + int( math.log( n, 2 ) )
for k, tt in ( ( 100, 27 ),
( 150, 18 ),
( 200, 15 ),
( 250, 12 ),
( 300, 9 ),
( 350, 8 ),
( 400, 7 ),
( 450, 6 ),
( 550, 5 ),
( 650, 4 ),
( 850, 3 ),
( 1300, 2 ),
):
if n_bits < k: break
t = tt
# Run the test t times:
s = 0
r = n - 1
while ( r % 2 ) == 0:
s = s + 1
r = r // 2
for i in range( t ):
a = smallprimes[ i ]
y = modular_exp( a, r, n )
if y != 1 and y != n-1:
j = 1
while j <= s - 1 and y != n - 1:
y = modular_exp( y, 2, n )
if y == 1:
miller_rabin_test_count = i + 1
return False
j = j + 1
if y != n-1:
miller_rabin_test_count = i + 1
return False
return True |
def view_all_work_queues():
"""Page for viewing the index of all active work queues."""
count_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.count(work_queue.WorkQueue.task_id))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
queue_dict = {}
for name, status, count in count_list:
queue_dict[(name, status)] = dict(
name=name, status=status, count=count)
max_created_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.max(work_queue.WorkQueue.created))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
for name, status, newest_created in max_created_list:
queue_dict[(name, status)]['newest_created'] = newest_created
min_eta_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.min(work_queue.WorkQueue.eta))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
for name, status, oldest_eta in min_eta_list:
queue_dict[(name, status)]['oldest_eta'] = oldest_eta
queue_list = list(queue_dict.values())
queue_list.sort(key=lambda x: (x['name'], x['status']))
context = dict(
queue_list=queue_list,
)
return render_template('view_work_queue_index.html', **context) | Page for viewing the index of all active work queues. | Below is the the instruction that describes the task:
### Input:
Page for viewing the index of all active work queues.
### Response:
def view_all_work_queues():
"""Page for viewing the index of all active work queues."""
count_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.count(work_queue.WorkQueue.task_id))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
queue_dict = {}
for name, status, count in count_list:
queue_dict[(name, status)] = dict(
name=name, status=status, count=count)
max_created_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.max(work_queue.WorkQueue.created))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
for name, status, newest_created in max_created_list:
queue_dict[(name, status)]['newest_created'] = newest_created
min_eta_list = list(
db.session.query(
work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status,
func.min(work_queue.WorkQueue.eta))
.group_by(work_queue.WorkQueue.queue_name,
work_queue.WorkQueue.status))
for name, status, oldest_eta in min_eta_list:
queue_dict[(name, status)]['oldest_eta'] = oldest_eta
queue_list = list(queue_dict.values())
queue_list.sort(key=lambda x: (x['name'], x['status']))
context = dict(
queue_list=queue_list,
)
return render_template('view_work_queue_index.html', **context) |
def _set_rho_grids(self):
""" Set the grids and weights for rho used in numerical integration
of AR(1) parameters.
"""
rho_grids = np.arange(self.rho_bins) * 2 / self.rho_bins - 1 \
+ 1 / self.rho_bins
rho_weights = np.ones(self.rho_bins) / self.rho_bins
return rho_grids, rho_weights | Set the grids and weights for rho used in numerical integration
of AR(1) parameters. | Below is the the instruction that describes the task:
### Input:
Set the grids and weights for rho used in numerical integration
of AR(1) parameters.
### Response:
def _set_rho_grids(self):
""" Set the grids and weights for rho used in numerical integration
of AR(1) parameters.
"""
rho_grids = np.arange(self.rho_bins) * 2 / self.rho_bins - 1 \
+ 1 / self.rho_bins
rho_weights = np.ones(self.rho_bins) / self.rho_bins
return rho_grids, rho_weights |
def _maybe_download_corpora(tmp_dir):
"""Download corpora for multinli.
Args:
tmp_dir: a string
Returns:
a string
"""
mnli_filename = "MNLI.zip"
mnli_finalpath = os.path.join(tmp_dir, "MNLI")
if not tf.gfile.Exists(mnli_finalpath):
zip_filepath = generator_utils.maybe_download(
tmp_dir, mnli_filename, _MNLI_URL)
zip_ref = zipfile.ZipFile(zip_filepath, "r")
zip_ref.extractall(tmp_dir)
zip_ref.close()
return mnli_finalpath | Download corpora for multinli.
Args:
tmp_dir: a string
Returns:
a string | Below is the the instruction that describes the task:
### Input:
Download corpora for multinli.
Args:
tmp_dir: a string
Returns:
a string
### Response:
def _maybe_download_corpora(tmp_dir):
"""Download corpora for multinli.
Args:
tmp_dir: a string
Returns:
a string
"""
mnli_filename = "MNLI.zip"
mnli_finalpath = os.path.join(tmp_dir, "MNLI")
if not tf.gfile.Exists(mnli_finalpath):
zip_filepath = generator_utils.maybe_download(
tmp_dir, mnli_filename, _MNLI_URL)
zip_ref = zipfile.ZipFile(zip_filepath, "r")
zip_ref.extractall(tmp_dir)
zip_ref.close()
return mnli_finalpath |
def get_grade_entry_form_for_update(self, grade_entry_id):
"""Gets the grade entry form for updating an existing entry.
A new grade entry form should be requested for each update
transaction.
arg: grade_entry_id (osid.id.Id): the ``Id`` of the
``GradeEntry``
return: (osid.grading.GradeEntryForm) - the grade entry form
raise: NotFound - ``grade_entry_id`` is not found
raise: NullArgument - ``grade_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('grading',
collection='GradeEntry',
runtime=self._runtime)
if not isinstance(grade_entry_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
if (grade_entry_id.get_identifier_namespace() != 'grading.GradeEntry' or
grade_entry_id.get_authority() != self._authority):
raise errors.InvalidArgument()
result = collection.find_one({'_id': ObjectId(grade_entry_id.get_identifier())})
obj_form = objects.GradeEntryForm(
osid_object_map=result,
effective_agent_id=str(self.get_effective_agent_id()),
runtime=self._runtime,
proxy=self._proxy)
self._forms[obj_form.get_id().get_identifier()] = not UPDATED
return obj_form | Gets the grade entry form for updating an existing entry.
A new grade entry form should be requested for each update
transaction.
arg: grade_entry_id (osid.id.Id): the ``Id`` of the
``GradeEntry``
return: (osid.grading.GradeEntryForm) - the grade entry form
raise: NotFound - ``grade_entry_id`` is not found
raise: NullArgument - ``grade_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the grade entry form for updating an existing entry.
A new grade entry form should be requested for each update
transaction.
arg: grade_entry_id (osid.id.Id): the ``Id`` of the
``GradeEntry``
return: (osid.grading.GradeEntryForm) - the grade entry form
raise: NotFound - ``grade_entry_id`` is not found
raise: NullArgument - ``grade_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_grade_entry_form_for_update(self, grade_entry_id):
"""Gets the grade entry form for updating an existing entry.
A new grade entry form should be requested for each update
transaction.
arg: grade_entry_id (osid.id.Id): the ``Id`` of the
``GradeEntry``
return: (osid.grading.GradeEntryForm) - the grade entry form
raise: NotFound - ``grade_entry_id`` is not found
raise: NullArgument - ``grade_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
collection = JSONClientValidated('grading',
collection='GradeEntry',
runtime=self._runtime)
if not isinstance(grade_entry_id, ABCId):
raise errors.InvalidArgument('the argument is not a valid OSID Id')
if (grade_entry_id.get_identifier_namespace() != 'grading.GradeEntry' or
grade_entry_id.get_authority() != self._authority):
raise errors.InvalidArgument()
result = collection.find_one({'_id': ObjectId(grade_entry_id.get_identifier())})
obj_form = objects.GradeEntryForm(
osid_object_map=result,
effective_agent_id=str(self.get_effective_agent_id()),
runtime=self._runtime,
proxy=self._proxy)
self._forms[obj_form.get_id().get_identifier()] = not UPDATED
return obj_form |
def perc(arr, p=95, **kwargs):
"""Create symmetric percentiles, with ``p`` coverage."""
offset = (100 - p) / 2
return np.percentile(arr, (offset, 100 - offset), **kwargs) | Create symmetric percentiles, with ``p`` coverage. | Below is the the instruction that describes the task:
### Input:
Create symmetric percentiles, with ``p`` coverage.
### Response:
def perc(arr, p=95, **kwargs):
"""Create symmetric percentiles, with ``p`` coverage."""
offset = (100 - p) / 2
return np.percentile(arr, (offset, 100 - offset), **kwargs) |
def close(self):
"""close(self)"""
if self.isClosed:
raise ValueError("operation illegal for closed doc")
if hasattr(self, '_outline') and self._outline:
self._dropOutline(self._outline)
self._outline = None
self._reset_page_refs()
self.metadata = None
self.stream = None
self.isClosed = True
self.openErrCode = 0
self.openErrMsg = ''
self.FontInfos = []
for gmap in self.Graftmaps:
self.Graftmaps[gmap] = None
self.Graftmaps = {}
self.ShownPages = {}
val = _fitz.Document_close(self)
self.thisown = False
return val | close(self) | Below is the the instruction that describes the task:
### Input:
close(self)
### Response:
def close(self):
"""close(self)"""
if self.isClosed:
raise ValueError("operation illegal for closed doc")
if hasattr(self, '_outline') and self._outline:
self._dropOutline(self._outline)
self._outline = None
self._reset_page_refs()
self.metadata = None
self.stream = None
self.isClosed = True
self.openErrCode = 0
self.openErrMsg = ''
self.FontInfos = []
for gmap in self.Graftmaps:
self.Graftmaps[gmap] = None
self.Graftmaps = {}
self.ShownPages = {}
val = _fitz.Document_close(self)
self.thisown = False
return val |
def create_cache(directory, compress_level=6, value_type_is_binary=False, **kwargs):
"""
Create a html cache. Html string will be automatically compressed.
:param directory: path for the cache directory.
:param compress_level: 0 ~ 9, 9 is slowest and smallest.
:param kwargs: other arguments.
:return: a `diskcache.Cache()`
"""
cache = diskcache.Cache(
directory,
disk=CompressedDisk,
disk_compress_level=compress_level,
disk_value_type_is_binary=value_type_is_binary,
**kwargs
)
return cache | Create a html cache. Html string will be automatically compressed.
:param directory: path for the cache directory.
:param compress_level: 0 ~ 9, 9 is slowest and smallest.
:param kwargs: other arguments.
:return: a `diskcache.Cache()` | Below is the the instruction that describes the task:
### Input:
Create a html cache. Html string will be automatically compressed.
:param directory: path for the cache directory.
:param compress_level: 0 ~ 9, 9 is slowest and smallest.
:param kwargs: other arguments.
:return: a `diskcache.Cache()`
### Response:
def create_cache(directory, compress_level=6, value_type_is_binary=False, **kwargs):
"""
Create a html cache. Html string will be automatically compressed.
:param directory: path for the cache directory.
:param compress_level: 0 ~ 9, 9 is slowest and smallest.
:param kwargs: other arguments.
:return: a `diskcache.Cache()`
"""
cache = diskcache.Cache(
directory,
disk=CompressedDisk,
disk_compress_level=compress_level,
disk_value_type_is_binary=value_type_is_binary,
**kwargs
)
return cache |
def plotnoise(noisepkl, mergepkl, plot_width=950, plot_height=400):
""" Make two panel plot to summary noise analysis with estimated flux scale """
d = pickle.load(open(mergepkl))
ndist, imstd, flagfrac = plotnoisedist(noisepkl, plot_width=plot_width/2, plot_height=plot_height)
fluxscale = calcfluxscale(d, imstd, flagfrac)
logger.info('Median image noise is {0:.3} Jy.'.format(fluxscale*imstd))
ncum, imnoise = plotnoisecum(noisepkl, fluxscale=fluxscale, plot_width=plot_width/2, plot_height=plot_height)
hndle = show(Row(ndist, ncum, width=plot_width, height=plot_height))
return imnoise | Make two panel plot to summary noise analysis with estimated flux scale | Below is the the instruction that describes the task:
### Input:
Make two panel plot to summary noise analysis with estimated flux scale
### Response:
def plotnoise(noisepkl, mergepkl, plot_width=950, plot_height=400):
""" Make two panel plot to summary noise analysis with estimated flux scale """
d = pickle.load(open(mergepkl))
ndist, imstd, flagfrac = plotnoisedist(noisepkl, plot_width=plot_width/2, plot_height=plot_height)
fluxscale = calcfluxscale(d, imstd, flagfrac)
logger.info('Median image noise is {0:.3} Jy.'.format(fluxscale*imstd))
ncum, imnoise = plotnoisecum(noisepkl, fluxscale=fluxscale, plot_width=plot_width/2, plot_height=plot_height)
hndle = show(Row(ndist, ncum, width=plot_width, height=plot_height))
return imnoise |
def readTable(self, tableName):
"""
Read the table corresponding to the specified name, equivalent to the
AMPL statement:
.. code-block:: ampl
read table tableName;
Args:
tableName: Name of the table to be read.
"""
lock_and_call(
lambda: self._impl.readTable(tableName),
self._lock
) | Read the table corresponding to the specified name, equivalent to the
AMPL statement:
.. code-block:: ampl
read table tableName;
Args:
tableName: Name of the table to be read. | Below is the the instruction that describes the task:
### Input:
Read the table corresponding to the specified name, equivalent to the
AMPL statement:
.. code-block:: ampl
read table tableName;
Args:
tableName: Name of the table to be read.
### Response:
def readTable(self, tableName):
"""
Read the table corresponding to the specified name, equivalent to the
AMPL statement:
.. code-block:: ampl
read table tableName;
Args:
tableName: Name of the table to be read.
"""
lock_and_call(
lambda: self._impl.readTable(tableName),
self._lock
) |
def batching_scheme(batch_size,
max_length,
min_length_bucket,
length_bucket_step,
drop_long_sequences=False,
shard_multiplier=1,
length_multiplier=1,
min_length=0):
"""A batching scheme based on model hyperparameters.
Every batch contains a number of sequences divisible by `shard_multiplier`.
Args:
batch_size: int, total number of tokens in a batch.
max_length: int, sequences longer than this will be skipped. Defaults to
batch_size.
min_length_bucket: int
length_bucket_step: float greater than 1.0
drop_long_sequences: bool, if True, then sequences longer than
`max_length` are dropped. This prevents generating batches with
more than the usual number of tokens, which can cause out-of-memory
errors.
shard_multiplier: an integer increasing the batch_size to suit splitting
across datashards.
length_multiplier: an integer multiplier that is used to increase the
batch sizes and sequence length tolerance.
min_length: int, sequences shorter than this will be skipped.
Returns:
A dictionary with parameters that can be passed to input_pipeline:
* boundaries: list of bucket boundaries
* batch_sizes: list of batch sizes for each length bucket
* max_length: int, maximum length of an example
Raises:
ValueError: If min_length > max_length
"""
max_length = max_length or batch_size
if max_length < min_length:
raise ValueError("max_length must be greater or equal to min_length")
boundaries = _bucket_boundaries(max_length, min_length_bucket,
length_bucket_step)
boundaries = [boundary * length_multiplier for boundary in boundaries]
max_length *= length_multiplier
batch_sizes = [
max(1, batch_size // length) for length in boundaries + [max_length]
]
max_batch_size = max(batch_sizes)
# Since the Datasets API only allows a single constant for window_size,
# and it needs divide all bucket_batch_sizes, we pick a highly-composite
# window size and then round down all batch sizes to divisors of that window
# size, so that a window can always be divided evenly into batches.
# TODO(noam): remove this when Dataset API improves.
highly_composite_numbers = [
1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680,
2520, 5040, 7560, 10080, 15120, 20160, 25200, 27720, 45360, 50400, 55440,
83160, 110880, 166320, 221760, 277200, 332640, 498960, 554400, 665280,
720720, 1081080, 1441440, 2162160, 2882880, 3603600, 4324320, 6486480,
7207200, 8648640, 10810800, 14414400, 17297280, 21621600, 32432400,
36756720, 43243200, 61261200, 73513440, 110270160
]
window_size = max(
[i for i in highly_composite_numbers if i <= 3 * max_batch_size])
divisors = [i for i in range(1, window_size + 1) if window_size % i == 0]
batch_sizes = [max([d for d in divisors if d <= bs]) for bs in batch_sizes]
window_size *= shard_multiplier
batch_sizes = [bs * shard_multiplier for bs in batch_sizes]
# The Datasets API splits one window into multiple batches, which
# produces runs of many consecutive batches of the same size. This
# is bad for training. To solve this, we will shuffle the batches
# using a queue which must be several times as large as the maximum
# number of batches per window.
max_batches_per_window = window_size // min(batch_sizes)
shuffle_queue_size = max_batches_per_window * 3
ret = {
"boundaries": boundaries,
"batch_sizes": batch_sizes,
"min_length": min_length,
"max_length": (max_length if drop_long_sequences else 10**9),
"shuffle_queue_size": shuffle_queue_size,
}
return ret | A batching scheme based on model hyperparameters.
Every batch contains a number of sequences divisible by `shard_multiplier`.
Args:
batch_size: int, total number of tokens in a batch.
max_length: int, sequences longer than this will be skipped. Defaults to
batch_size.
min_length_bucket: int
length_bucket_step: float greater than 1.0
drop_long_sequences: bool, if True, then sequences longer than
`max_length` are dropped. This prevents generating batches with
more than the usual number of tokens, which can cause out-of-memory
errors.
shard_multiplier: an integer increasing the batch_size to suit splitting
across datashards.
length_multiplier: an integer multiplier that is used to increase the
batch sizes and sequence length tolerance.
min_length: int, sequences shorter than this will be skipped.
Returns:
A dictionary with parameters that can be passed to input_pipeline:
* boundaries: list of bucket boundaries
* batch_sizes: list of batch sizes for each length bucket
* max_length: int, maximum length of an example
Raises:
ValueError: If min_length > max_length | Below is the the instruction that describes the task:
### Input:
A batching scheme based on model hyperparameters.
Every batch contains a number of sequences divisible by `shard_multiplier`.
Args:
batch_size: int, total number of tokens in a batch.
max_length: int, sequences longer than this will be skipped. Defaults to
batch_size.
min_length_bucket: int
length_bucket_step: float greater than 1.0
drop_long_sequences: bool, if True, then sequences longer than
`max_length` are dropped. This prevents generating batches with
more than the usual number of tokens, which can cause out-of-memory
errors.
shard_multiplier: an integer increasing the batch_size to suit splitting
across datashards.
length_multiplier: an integer multiplier that is used to increase the
batch sizes and sequence length tolerance.
min_length: int, sequences shorter than this will be skipped.
Returns:
A dictionary with parameters that can be passed to input_pipeline:
* boundaries: list of bucket boundaries
* batch_sizes: list of batch sizes for each length bucket
* max_length: int, maximum length of an example
Raises:
ValueError: If min_length > max_length
### Response:
def batching_scheme(batch_size,
max_length,
min_length_bucket,
length_bucket_step,
drop_long_sequences=False,
shard_multiplier=1,
length_multiplier=1,
min_length=0):
"""A batching scheme based on model hyperparameters.
Every batch contains a number of sequences divisible by `shard_multiplier`.
Args:
batch_size: int, total number of tokens in a batch.
max_length: int, sequences longer than this will be skipped. Defaults to
batch_size.
min_length_bucket: int
length_bucket_step: float greater than 1.0
drop_long_sequences: bool, if True, then sequences longer than
`max_length` are dropped. This prevents generating batches with
more than the usual number of tokens, which can cause out-of-memory
errors.
shard_multiplier: an integer increasing the batch_size to suit splitting
across datashards.
length_multiplier: an integer multiplier that is used to increase the
batch sizes and sequence length tolerance.
min_length: int, sequences shorter than this will be skipped.
Returns:
A dictionary with parameters that can be passed to input_pipeline:
* boundaries: list of bucket boundaries
* batch_sizes: list of batch sizes for each length bucket
* max_length: int, maximum length of an example
Raises:
ValueError: If min_length > max_length
"""
max_length = max_length or batch_size
if max_length < min_length:
raise ValueError("max_length must be greater or equal to min_length")
boundaries = _bucket_boundaries(max_length, min_length_bucket,
length_bucket_step)
boundaries = [boundary * length_multiplier for boundary in boundaries]
max_length *= length_multiplier
batch_sizes = [
max(1, batch_size // length) for length in boundaries + [max_length]
]
max_batch_size = max(batch_sizes)
# Since the Datasets API only allows a single constant for window_size,
# and it needs divide all bucket_batch_sizes, we pick a highly-composite
# window size and then round down all batch sizes to divisors of that window
# size, so that a window can always be divided evenly into batches.
# TODO(noam): remove this when Dataset API improves.
highly_composite_numbers = [
1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680,
2520, 5040, 7560, 10080, 15120, 20160, 25200, 27720, 45360, 50400, 55440,
83160, 110880, 166320, 221760, 277200, 332640, 498960, 554400, 665280,
720720, 1081080, 1441440, 2162160, 2882880, 3603600, 4324320, 6486480,
7207200, 8648640, 10810800, 14414400, 17297280, 21621600, 32432400,
36756720, 43243200, 61261200, 73513440, 110270160
]
window_size = max(
[i for i in highly_composite_numbers if i <= 3 * max_batch_size])
divisors = [i for i in range(1, window_size + 1) if window_size % i == 0]
batch_sizes = [max([d for d in divisors if d <= bs]) for bs in batch_sizes]
window_size *= shard_multiplier
batch_sizes = [bs * shard_multiplier for bs in batch_sizes]
# The Datasets API splits one window into multiple batches, which
# produces runs of many consecutive batches of the same size. This
# is bad for training. To solve this, we will shuffle the batches
# using a queue which must be several times as large as the maximum
# number of batches per window.
max_batches_per_window = window_size // min(batch_sizes)
shuffle_queue_size = max_batches_per_window * 3
ret = {
"boundaries": boundaries,
"batch_sizes": batch_sizes,
"min_length": min_length,
"max_length": (max_length if drop_long_sequences else 10**9),
"shuffle_queue_size": shuffle_queue_size,
}
return ret |
def fixpath(path):
"""Uniformly format a path."""
return os.path.normpath(os.path.realpath(os.path.expanduser(path))) | Uniformly format a path. | Below is the the instruction that describes the task:
### Input:
Uniformly format a path.
### Response:
def fixpath(path):
"""Uniformly format a path."""
return os.path.normpath(os.path.realpath(os.path.expanduser(path))) |
def _create_cpe_parts(self, system, components):
"""
Create the structure to store the input type of system associated
with components of CPE Name (hardware, operating system and software).
:param string system: type of system associated with CPE Name
:param dict components: CPE Name components to store
:returns: None
:exception: KeyError - incorrect system
"""
if system not in CPEComponent.SYSTEM_VALUES:
errmsg = "Key '{0}' is not exist".format(system)
raise ValueError(errmsg)
elements = []
elements.append(components)
pk = CPE._system_and_parts[system]
self[pk] = elements | Create the structure to store the input type of system associated
with components of CPE Name (hardware, operating system and software).
:param string system: type of system associated with CPE Name
:param dict components: CPE Name components to store
:returns: None
:exception: KeyError - incorrect system | Below is the the instruction that describes the task:
### Input:
Create the structure to store the input type of system associated
with components of CPE Name (hardware, operating system and software).
:param string system: type of system associated with CPE Name
:param dict components: CPE Name components to store
:returns: None
:exception: KeyError - incorrect system
### Response:
def _create_cpe_parts(self, system, components):
"""
Create the structure to store the input type of system associated
with components of CPE Name (hardware, operating system and software).
:param string system: type of system associated with CPE Name
:param dict components: CPE Name components to store
:returns: None
:exception: KeyError - incorrect system
"""
if system not in CPEComponent.SYSTEM_VALUES:
errmsg = "Key '{0}' is not exist".format(system)
raise ValueError(errmsg)
elements = []
elements.append(components)
pk = CPE._system_and_parts[system]
self[pk] = elements |
def equal(self, value_a, value_b): #pylint: disable=no-self-use
"""Check if two valid Property values are equal
.. note::
This method assumes that :code:`None` and
:code:`properties.undefined` are never passed in as values
"""
equal = value_a == value_b
if hasattr(equal, '__iter__'):
return all(equal)
return equal | Check if two valid Property values are equal
.. note::
This method assumes that :code:`None` and
:code:`properties.undefined` are never passed in as values | Below is the the instruction that describes the task:
### Input:
Check if two valid Property values are equal
.. note::
This method assumes that :code:`None` and
:code:`properties.undefined` are never passed in as values
### Response:
def equal(self, value_a, value_b): #pylint: disable=no-self-use
"""Check if two valid Property values are equal
.. note::
This method assumes that :code:`None` and
:code:`properties.undefined` are never passed in as values
"""
equal = value_a == value_b
if hasattr(equal, '__iter__'):
return all(equal)
return equal |
def add_entry(self, row):
"""This will parse the VCF entry and also store it within the VCFFile. It will also
return the VCFEntry as well.
"""
var_call = VCFEntry(self.individuals)
var_call.parse_entry( row )
self.entries[(var_call.chrom, var_call.pos)] = var_call
return var_call | This will parse the VCF entry and also store it within the VCFFile. It will also
return the VCFEntry as well. | Below is the the instruction that describes the task:
### Input:
This will parse the VCF entry and also store it within the VCFFile. It will also
return the VCFEntry as well.
### Response:
def add_entry(self, row):
"""This will parse the VCF entry and also store it within the VCFFile. It will also
return the VCFEntry as well.
"""
var_call = VCFEntry(self.individuals)
var_call.parse_entry( row )
self.entries[(var_call.chrom, var_call.pos)] = var_call
return var_call |
def queries(self, rcSuffix='', rcNeeded=False, padChar='-',
queryInsertionChar='N', unknownQualityChar='!',
allowDuplicateIds=False, addAlignment=False):
"""
Produce padded (with gaps) queries according to the CIGAR string and
reference sequence length for each matching query sequence.
@param rcSuffix: A C{str} to add to the end of query names that are
reverse complemented. This is added before the /1, /2, etc., that
are added for duplicated ids (if there are duplicates and
C{allowDuplicateIds} is C{False}.
@param rcNeeded: If C{True}, queries that are flagged as matching when
reverse complemented should have reverse complementing when
preparing the output sequences. This must be used if the program
that created the SAM/BAM input flags reversed matches but does not
also store the reverse complemented query.
@param padChar: A C{str} of length one to use to pad queries with to
make them the same length as the reference sequence.
@param queryInsertionChar: A C{str} of length one to use to insert
into queries when the CIGAR string indicates that the alignment
of a query would cause a deletion in the reference. This character
is inserted as a 'missing' query character (i.e., a base that can
be assumed to have been lost due to an error) whose existence is
necessary for the match to continue.
@param unknownQualityChar: The character to put into the quality
string when unknown bases are inserted in the query or the query
is padded on the left/right with gaps.
@param allowDuplicateIds: If C{True}, repeated query ids (due to
secondary or supplemental matches) will not have /1, /2, etc.
appended to their ids. So repeated ids may appear in the yielded
FASTA.
@param addAlignment: If C{True} the reads yielded by the returned
generator will also have an C{alignment} attribute, being the
C{pysam.AlignedSegment} for the query.
@raises InvalidSAM: If a query has an empty SEQ field and either there
is no previous alignment or the alignment is not marked as
secondary or supplementary.
@return: A generator that yields C{Read} instances that are padded
with gap characters to align them to the length of the reference
sequence. See C{addAlignment}, above, to yield reads with the
corresponding C{pysam.AlignedSegment}.
"""
referenceLength = self.referenceLength
# Hold the count for each id so we can add /1, /2 etc to duplicate
# ids (unless --allowDuplicateIds was given).
idCount = Counter()
MATCH_OPERATIONS = {CMATCH, CEQUAL, CDIFF}
for lineNumber, alignment in enumerate(
self.samFilter.alignments(), start=1):
query = alignment.query_sequence
quality = ''.join(chr(q + 33) for q in alignment.query_qualities)
if alignment.is_reverse:
if rcNeeded:
query = DNARead('id', query).reverseComplement().sequence
quality = quality[::-1]
if rcSuffix:
alignment.query_name += rcSuffix
# Adjust the query id if it's a duplicate and we're not allowing
# duplicates.
if allowDuplicateIds:
queryId = alignment.query_name
else:
count = idCount[alignment.query_name]
idCount[alignment.query_name] += 1
queryId = alignment.query_name + (
'' if count == 0 else '/%d' % count)
referenceStart = alignment.reference_start
atStart = True
queryIndex = 0
referenceIndex = referenceStart
alignedSequence = ''
alignedQuality = ''
for operation, length in alignment.cigartuples:
# The operations are tested in the order they appear in
# https://samtools.github.io/hts-specs/SAMv1.pdf It would be
# more efficient to test them in order of frequency of
# occurrence.
if operation in MATCH_OPERATIONS:
atStart = False
alignedSequence += query[queryIndex:queryIndex + length]
alignedQuality += quality[queryIndex:queryIndex + length]
elif operation == CINS:
# Insertion to the reference. This consumes query bases but
# we don't output them because the reference cannot be
# changed. I.e., these bases in the query would need to be
# inserted into the reference. Remove these bases from the
# query but record what would have been inserted into the
# reference.
atStart = False
self.referenceInsertions[queryId].append(
(referenceIndex,
query[queryIndex:queryIndex + length]))
elif operation == CDEL:
# Delete from the reference. Some bases from the reference
# would need to be deleted to continue the match. So we put
# an insertion into the query to compensate.
atStart = False
alignedSequence += queryInsertionChar * length
alignedQuality += unknownQualityChar * length
elif operation == CREF_SKIP:
# Skipped reference. Opens a gap in the query. For
# mRNA-to-genome alignment, an N operation represents an
# intron. For other types of alignments, the
# interpretation of N is not defined. So this is unlikely
# to occur.
atStart = False
alignedSequence += queryInsertionChar * length
alignedQuality += unknownQualityChar * length
elif operation == CSOFT_CLIP:
# Bases in the query that are not part of the match. We
# remove these from the query if they protrude before the
# start or after the end of the reference. According to the
# SAM docs, 'S' operations may only have 'H' operations
# between them and the ends of the CIGAR string.
if atStart:
# Don't set atStart=False, in case there's another 'S'
# operation.
unwantedLeft = length - referenceStart
if unwantedLeft > 0:
# The query protrudes left. Copy its right part.
alignedSequence += query[
queryIndex + unwantedLeft:queryIndex + length]
alignedQuality += quality[
queryIndex + unwantedLeft:queryIndex + length]
referenceStart = 0
else:
referenceStart -= length
alignedSequence += query[
queryIndex:queryIndex + length]
alignedQuality += quality[
queryIndex:queryIndex + length]
else:
unwantedRight = (
(referenceStart + len(alignedSequence) + length) -
referenceLength)
if unwantedRight > 0:
# The query protrudes right. Copy its left part.
alignedSequence += query[
queryIndex:queryIndex + length - unwantedRight]
alignedQuality += quality[
queryIndex:queryIndex + length - unwantedRight]
else:
alignedSequence += query[
queryIndex:queryIndex + length]
alignedQuality += quality[
queryIndex:queryIndex + length]
elif operation == CHARD_CLIP:
# Some bases have been completely removed from the query.
# This (H) can only be present as the first and/or last
# operation. There is nothing to do as the bases are simply
# not present in the query string in the SAM/BAM file.
pass
elif operation == CPAD:
# This is "silent deletion from the padded reference",
# which consumes neither query nor reference.
atStart = False
else:
raise ValueError('Unknown CIGAR operation:', operation)
if operation in _CONSUMES_QUERY:
queryIndex += length
if operation in _CONSUMES_REFERENCE:
referenceIndex += length
if queryIndex != len(query):
# Oops, we did not consume the entire query.
raise ValueError(
'Query %r not fully consumed when parsing CIGAR string. '
'Query %r (len %d), final query index %d, CIGAR: %r' %
(alignment.query_name, query, len(query), queryIndex,
alignment.cigartuples))
# We cannot test we consumed the entire reference. The CIGAR
# string applies to (and exhausts) the query but is silent
# about the part of the reference that lies to the right of the
# aligned query.
# Put gap characters before and after the aligned sequence so that
# it is offset properly and matches the length of the reference.
padRightLength = (referenceLength -
(referenceStart + len(alignedSequence)))
paddedSequence = (padChar * referenceStart +
alignedSequence +
padChar * padRightLength)
paddedQuality = (unknownQualityChar * referenceStart +
alignedQuality +
unknownQualityChar * padRightLength)
read = Read(queryId, paddedSequence, paddedQuality)
if addAlignment:
read.alignment = alignment
yield read | Produce padded (with gaps) queries according to the CIGAR string and
reference sequence length for each matching query sequence.
@param rcSuffix: A C{str} to add to the end of query names that are
reverse complemented. This is added before the /1, /2, etc., that
are added for duplicated ids (if there are duplicates and
C{allowDuplicateIds} is C{False}.
@param rcNeeded: If C{True}, queries that are flagged as matching when
reverse complemented should have reverse complementing when
preparing the output sequences. This must be used if the program
that created the SAM/BAM input flags reversed matches but does not
also store the reverse complemented query.
@param padChar: A C{str} of length one to use to pad queries with to
make them the same length as the reference sequence.
@param queryInsertionChar: A C{str} of length one to use to insert
into queries when the CIGAR string indicates that the alignment
of a query would cause a deletion in the reference. This character
is inserted as a 'missing' query character (i.e., a base that can
be assumed to have been lost due to an error) whose existence is
necessary for the match to continue.
@param unknownQualityChar: The character to put into the quality
string when unknown bases are inserted in the query or the query
is padded on the left/right with gaps.
@param allowDuplicateIds: If C{True}, repeated query ids (due to
secondary or supplemental matches) will not have /1, /2, etc.
appended to their ids. So repeated ids may appear in the yielded
FASTA.
@param addAlignment: If C{True} the reads yielded by the returned
generator will also have an C{alignment} attribute, being the
C{pysam.AlignedSegment} for the query.
@raises InvalidSAM: If a query has an empty SEQ field and either there
is no previous alignment or the alignment is not marked as
secondary or supplementary.
@return: A generator that yields C{Read} instances that are padded
with gap characters to align them to the length of the reference
sequence. See C{addAlignment}, above, to yield reads with the
corresponding C{pysam.AlignedSegment}. | Below is the the instruction that describes the task:
### Input:
Produce padded (with gaps) queries according to the CIGAR string and
reference sequence length for each matching query sequence.
@param rcSuffix: A C{str} to add to the end of query names that are
reverse complemented. This is added before the /1, /2, etc., that
are added for duplicated ids (if there are duplicates and
C{allowDuplicateIds} is C{False}.
@param rcNeeded: If C{True}, queries that are flagged as matching when
reverse complemented should have reverse complementing when
preparing the output sequences. This must be used if the program
that created the SAM/BAM input flags reversed matches but does not
also store the reverse complemented query.
@param padChar: A C{str} of length one to use to pad queries with to
make them the same length as the reference sequence.
@param queryInsertionChar: A C{str} of length one to use to insert
into queries when the CIGAR string indicates that the alignment
of a query would cause a deletion in the reference. This character
is inserted as a 'missing' query character (i.e., a base that can
be assumed to have been lost due to an error) whose existence is
necessary for the match to continue.
@param unknownQualityChar: The character to put into the quality
string when unknown bases are inserted in the query or the query
is padded on the left/right with gaps.
@param allowDuplicateIds: If C{True}, repeated query ids (due to
secondary or supplemental matches) will not have /1, /2, etc.
appended to their ids. So repeated ids may appear in the yielded
FASTA.
@param addAlignment: If C{True} the reads yielded by the returned
generator will also have an C{alignment} attribute, being the
C{pysam.AlignedSegment} for the query.
@raises InvalidSAM: If a query has an empty SEQ field and either there
is no previous alignment or the alignment is not marked as
secondary or supplementary.
@return: A generator that yields C{Read} instances that are padded
with gap characters to align them to the length of the reference
sequence. See C{addAlignment}, above, to yield reads with the
corresponding C{pysam.AlignedSegment}.
### Response:
def queries(self, rcSuffix='', rcNeeded=False, padChar='-',
queryInsertionChar='N', unknownQualityChar='!',
allowDuplicateIds=False, addAlignment=False):
"""
Produce padded (with gaps) queries according to the CIGAR string and
reference sequence length for each matching query sequence.
@param rcSuffix: A C{str} to add to the end of query names that are
reverse complemented. This is added before the /1, /2, etc., that
are added for duplicated ids (if there are duplicates and
C{allowDuplicateIds} is C{False}.
@param rcNeeded: If C{True}, queries that are flagged as matching when
reverse complemented should have reverse complementing when
preparing the output sequences. This must be used if the program
that created the SAM/BAM input flags reversed matches but does not
also store the reverse complemented query.
@param padChar: A C{str} of length one to use to pad queries with to
make them the same length as the reference sequence.
@param queryInsertionChar: A C{str} of length one to use to insert
into queries when the CIGAR string indicates that the alignment
of a query would cause a deletion in the reference. This character
is inserted as a 'missing' query character (i.e., a base that can
be assumed to have been lost due to an error) whose existence is
necessary for the match to continue.
@param unknownQualityChar: The character to put into the quality
string when unknown bases are inserted in the query or the query
is padded on the left/right with gaps.
@param allowDuplicateIds: If C{True}, repeated query ids (due to
secondary or supplemental matches) will not have /1, /2, etc.
appended to their ids. So repeated ids may appear in the yielded
FASTA.
@param addAlignment: If C{True} the reads yielded by the returned
generator will also have an C{alignment} attribute, being the
C{pysam.AlignedSegment} for the query.
@raises InvalidSAM: If a query has an empty SEQ field and either there
is no previous alignment or the alignment is not marked as
secondary or supplementary.
@return: A generator that yields C{Read} instances that are padded
with gap characters to align them to the length of the reference
sequence. See C{addAlignment}, above, to yield reads with the
corresponding C{pysam.AlignedSegment}.
"""
referenceLength = self.referenceLength
# Hold the count for each id so we can add /1, /2 etc to duplicate
# ids (unless --allowDuplicateIds was given).
idCount = Counter()
MATCH_OPERATIONS = {CMATCH, CEQUAL, CDIFF}
for lineNumber, alignment in enumerate(
self.samFilter.alignments(), start=1):
query = alignment.query_sequence
quality = ''.join(chr(q + 33) for q in alignment.query_qualities)
if alignment.is_reverse:
if rcNeeded:
query = DNARead('id', query).reverseComplement().sequence
quality = quality[::-1]
if rcSuffix:
alignment.query_name += rcSuffix
# Adjust the query id if it's a duplicate and we're not allowing
# duplicates.
if allowDuplicateIds:
queryId = alignment.query_name
else:
count = idCount[alignment.query_name]
idCount[alignment.query_name] += 1
queryId = alignment.query_name + (
'' if count == 0 else '/%d' % count)
referenceStart = alignment.reference_start
atStart = True
queryIndex = 0
referenceIndex = referenceStart
alignedSequence = ''
alignedQuality = ''
for operation, length in alignment.cigartuples:
# The operations are tested in the order they appear in
# https://samtools.github.io/hts-specs/SAMv1.pdf It would be
# more efficient to test them in order of frequency of
# occurrence.
if operation in MATCH_OPERATIONS:
atStart = False
alignedSequence += query[queryIndex:queryIndex + length]
alignedQuality += quality[queryIndex:queryIndex + length]
elif operation == CINS:
# Insertion to the reference. This consumes query bases but
# we don't output them because the reference cannot be
# changed. I.e., these bases in the query would need to be
# inserted into the reference. Remove these bases from the
# query but record what would have been inserted into the
# reference.
atStart = False
self.referenceInsertions[queryId].append(
(referenceIndex,
query[queryIndex:queryIndex + length]))
elif operation == CDEL:
# Delete from the reference. Some bases from the reference
# would need to be deleted to continue the match. So we put
# an insertion into the query to compensate.
atStart = False
alignedSequence += queryInsertionChar * length
alignedQuality += unknownQualityChar * length
elif operation == CREF_SKIP:
# Skipped reference. Opens a gap in the query. For
# mRNA-to-genome alignment, an N operation represents an
# intron. For other types of alignments, the
# interpretation of N is not defined. So this is unlikely
# to occur.
atStart = False
alignedSequence += queryInsertionChar * length
alignedQuality += unknownQualityChar * length
elif operation == CSOFT_CLIP:
# Bases in the query that are not part of the match. We
# remove these from the query if they protrude before the
# start or after the end of the reference. According to the
# SAM docs, 'S' operations may only have 'H' operations
# between them and the ends of the CIGAR string.
if atStart:
# Don't set atStart=False, in case there's another 'S'
# operation.
unwantedLeft = length - referenceStart
if unwantedLeft > 0:
# The query protrudes left. Copy its right part.
alignedSequence += query[
queryIndex + unwantedLeft:queryIndex + length]
alignedQuality += quality[
queryIndex + unwantedLeft:queryIndex + length]
referenceStart = 0
else:
referenceStart -= length
alignedSequence += query[
queryIndex:queryIndex + length]
alignedQuality += quality[
queryIndex:queryIndex + length]
else:
unwantedRight = (
(referenceStart + len(alignedSequence) + length) -
referenceLength)
if unwantedRight > 0:
# The query protrudes right. Copy its left part.
alignedSequence += query[
queryIndex:queryIndex + length - unwantedRight]
alignedQuality += quality[
queryIndex:queryIndex + length - unwantedRight]
else:
alignedSequence += query[
queryIndex:queryIndex + length]
alignedQuality += quality[
queryIndex:queryIndex + length]
elif operation == CHARD_CLIP:
# Some bases have been completely removed from the query.
# This (H) can only be present as the first and/or last
# operation. There is nothing to do as the bases are simply
# not present in the query string in the SAM/BAM file.
pass
elif operation == CPAD:
# This is "silent deletion from the padded reference",
# which consumes neither query nor reference.
atStart = False
else:
raise ValueError('Unknown CIGAR operation:', operation)
if operation in _CONSUMES_QUERY:
queryIndex += length
if operation in _CONSUMES_REFERENCE:
referenceIndex += length
if queryIndex != len(query):
# Oops, we did not consume the entire query.
raise ValueError(
'Query %r not fully consumed when parsing CIGAR string. '
'Query %r (len %d), final query index %d, CIGAR: %r' %
(alignment.query_name, query, len(query), queryIndex,
alignment.cigartuples))
# We cannot test we consumed the entire reference. The CIGAR
# string applies to (and exhausts) the query but is silent
# about the part of the reference that lies to the right of the
# aligned query.
# Put gap characters before and after the aligned sequence so that
# it is offset properly and matches the length of the reference.
padRightLength = (referenceLength -
(referenceStart + len(alignedSequence)))
paddedSequence = (padChar * referenceStart +
alignedSequence +
padChar * padRightLength)
paddedQuality = (unknownQualityChar * referenceStart +
alignedQuality +
unknownQualityChar * padRightLength)
read = Read(queryId, paddedSequence, paddedQuality)
if addAlignment:
read.alignment = alignment
yield read |
def build_time(start_time):
"""
Calculate build time per package
"""
diff_time = round(time.time() - start_time, 2)
if diff_time <= 59.99:
sum_time = str(diff_time) + " Sec"
elif diff_time > 59.99 and diff_time <= 3599.99:
sum_time = round(diff_time / 60, 2)
sum_time_list = re.findall(r"\d+", str(sum_time))
sum_time = ("{0} Min {1} Sec".format(sum_time_list[0],
sum_time_list[1]))
elif diff_time > 3599.99:
sum_time = round(diff_time / 3600, 2)
sum_time_list = re.findall(r"\d+", str(sum_time))
sum_time = ("{0} Hours {1} Min".format(sum_time_list[0],
sum_time_list[1]))
return sum_time | Calculate build time per package | Below is the the instruction that describes the task:
### Input:
Calculate build time per package
### Response:
def build_time(start_time):
"""
Calculate build time per package
"""
diff_time = round(time.time() - start_time, 2)
if diff_time <= 59.99:
sum_time = str(diff_time) + " Sec"
elif diff_time > 59.99 and diff_time <= 3599.99:
sum_time = round(diff_time / 60, 2)
sum_time_list = re.findall(r"\d+", str(sum_time))
sum_time = ("{0} Min {1} Sec".format(sum_time_list[0],
sum_time_list[1]))
elif diff_time > 3599.99:
sum_time = round(diff_time / 3600, 2)
sum_time_list = re.findall(r"\d+", str(sum_time))
sum_time = ("{0} Hours {1} Min".format(sum_time_list[0],
sum_time_list[1]))
return sum_time |
def any(pred: Callable, xs: Iterable):
"""
Check if at least one element of the iterable `xs`
fullfills predicate `pred`.
:param pred:
predicate function.
:param xs:
iterable object.
:returns: boolean
"""
b = find_first(pred, xs)
return True if b is not None else False | Check if at least one element of the iterable `xs`
fullfills predicate `pred`.
:param pred:
predicate function.
:param xs:
iterable object.
:returns: boolean | Below is the the instruction that describes the task:
### Input:
Check if at least one element of the iterable `xs`
fullfills predicate `pred`.
:param pred:
predicate function.
:param xs:
iterable object.
:returns: boolean
### Response:
def any(pred: Callable, xs: Iterable):
"""
Check if at least one element of the iterable `xs`
fullfills predicate `pred`.
:param pred:
predicate function.
:param xs:
iterable object.
:returns: boolean
"""
b = find_first(pred, xs)
return True if b is not None else False |
def Solver_CMFR_N(t_data, C_data, theta_guess, C_bar_guess):
"""Use non-linear least squares to fit the function
Tracer_CMFR_N(t_seconds, t_bar, C_bar, N) to reactor data.
:param t_data: Array of times with units
:type t_data: float list
:param C_data: Array of tracer concentration data with units
:type C_data: float list
:param theta_guess: Estimate of time spent in one CMFR with units.
:type theta_guess: float
:param C_bar_guess: Estimate of average concentration with units ((mass of tracer)/(volume of one CMFR))
:type C_bar_guess: float
:return: tuple of
* **theta** (*float*)- Residence time in seconds
* **C_bar** (*float*) - Average concentration with same units as C_bar_guess
* **N** (*float*)- Number of CMFRS in series that best fit the data
"""
C_unitless = C_data.magnitude
C_units = str(C_bar_guess.units)
t_seconds = (t_data.to(u.s)).magnitude
# assume that a guess of 1 reactor in series is close enough to get a solution
p0 = [theta_guess.to(u.s).magnitude, C_bar_guess.magnitude,1]
popt, pcov = curve_fit(Tracer_CMFR_N, t_seconds, C_unitless, p0)
Solver_theta = popt[0]*u.s
Solver_C_bar = popt[1]*u(C_units)
Solver_N = popt[2]
Reactor_results = collections.namedtuple('Reactor_results','theta C_bar N')
CMFR = Reactor_results(theta=Solver_theta, C_bar=Solver_C_bar, N=Solver_N)
return CMFR | Use non-linear least squares to fit the function
Tracer_CMFR_N(t_seconds, t_bar, C_bar, N) to reactor data.
:param t_data: Array of times with units
:type t_data: float list
:param C_data: Array of tracer concentration data with units
:type C_data: float list
:param theta_guess: Estimate of time spent in one CMFR with units.
:type theta_guess: float
:param C_bar_guess: Estimate of average concentration with units ((mass of tracer)/(volume of one CMFR))
:type C_bar_guess: float
:return: tuple of
* **theta** (*float*)- Residence time in seconds
* **C_bar** (*float*) - Average concentration with same units as C_bar_guess
* **N** (*float*)- Number of CMFRS in series that best fit the data | Below is the the instruction that describes the task:
### Input:
Use non-linear least squares to fit the function
Tracer_CMFR_N(t_seconds, t_bar, C_bar, N) to reactor data.
:param t_data: Array of times with units
:type t_data: float list
:param C_data: Array of tracer concentration data with units
:type C_data: float list
:param theta_guess: Estimate of time spent in one CMFR with units.
:type theta_guess: float
:param C_bar_guess: Estimate of average concentration with units ((mass of tracer)/(volume of one CMFR))
:type C_bar_guess: float
:return: tuple of
* **theta** (*float*)- Residence time in seconds
* **C_bar** (*float*) - Average concentration with same units as C_bar_guess
* **N** (*float*)- Number of CMFRS in series that best fit the data
### Response:
def Solver_CMFR_N(t_data, C_data, theta_guess, C_bar_guess):
"""Use non-linear least squares to fit the function
Tracer_CMFR_N(t_seconds, t_bar, C_bar, N) to reactor data.
:param t_data: Array of times with units
:type t_data: float list
:param C_data: Array of tracer concentration data with units
:type C_data: float list
:param theta_guess: Estimate of time spent in one CMFR with units.
:type theta_guess: float
:param C_bar_guess: Estimate of average concentration with units ((mass of tracer)/(volume of one CMFR))
:type C_bar_guess: float
:return: tuple of
* **theta** (*float*)- Residence time in seconds
* **C_bar** (*float*) - Average concentration with same units as C_bar_guess
* **N** (*float*)- Number of CMFRS in series that best fit the data
"""
C_unitless = C_data.magnitude
C_units = str(C_bar_guess.units)
t_seconds = (t_data.to(u.s)).magnitude
# assume that a guess of 1 reactor in series is close enough to get a solution
p0 = [theta_guess.to(u.s).magnitude, C_bar_guess.magnitude,1]
popt, pcov = curve_fit(Tracer_CMFR_N, t_seconds, C_unitless, p0)
Solver_theta = popt[0]*u.s
Solver_C_bar = popt[1]*u(C_units)
Solver_N = popt[2]
Reactor_results = collections.namedtuple('Reactor_results','theta C_bar N')
CMFR = Reactor_results(theta=Solver_theta, C_bar=Solver_C_bar, N=Solver_N)
return CMFR |
def plot_high_levels_data(self):
"""
Complicated function that draws the high level mean plot on canvas4,
draws all specimen, sample, or site interpretations according to the
UPPER_LEVEL_SHOW variable, draws the fisher mean or fisher mean by
polarity of all interpretations displayed, draws sample orientation
check if on, and if interpretation editor is open it calls the
interpretation editor to have it draw the same things.
"""
# self.toolbar4.home()
high_level = self.level_box.GetValue()
self.UPPER_LEVEL_NAME = self.level_names.GetValue()
self.UPPER_LEVEL_MEAN = self.mean_type_box.GetValue()
draw_net(self.high_level_eqarea)
what_is_it = self.level_box.GetValue()+": "+self.level_names.GetValue()
self.high_level_eqarea.text(-1.2, 1.15, what_is_it, {
'family': self.font_type, 'fontsize': 10*self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'})
if self.ie_open:
self.ie.draw_net()
self.ie.write(what_is_it)
# plot elements directions
self.plot_high_level_elements()
# plot elements means
self.plot_high_level_means()
# update high level stats after plotting in case of change
self.update_high_level_stats()
# check sample orietation
if self.check_orient_on:
self.calc_and_plot_sample_orient_check()
self.canvas4.draw()
if self.ie_open:
self.ie.draw() | Complicated function that draws the high level mean plot on canvas4,
draws all specimen, sample, or site interpretations according to the
UPPER_LEVEL_SHOW variable, draws the fisher mean or fisher mean by
polarity of all interpretations displayed, draws sample orientation
check if on, and if interpretation editor is open it calls the
interpretation editor to have it draw the same things. | Below is the the instruction that describes the task:
### Input:
Complicated function that draws the high level mean plot on canvas4,
draws all specimen, sample, or site interpretations according to the
UPPER_LEVEL_SHOW variable, draws the fisher mean or fisher mean by
polarity of all interpretations displayed, draws sample orientation
check if on, and if interpretation editor is open it calls the
interpretation editor to have it draw the same things.
### Response:
def plot_high_levels_data(self):
"""
Complicated function that draws the high level mean plot on canvas4,
draws all specimen, sample, or site interpretations according to the
UPPER_LEVEL_SHOW variable, draws the fisher mean or fisher mean by
polarity of all interpretations displayed, draws sample orientation
check if on, and if interpretation editor is open it calls the
interpretation editor to have it draw the same things.
"""
# self.toolbar4.home()
high_level = self.level_box.GetValue()
self.UPPER_LEVEL_NAME = self.level_names.GetValue()
self.UPPER_LEVEL_MEAN = self.mean_type_box.GetValue()
draw_net(self.high_level_eqarea)
what_is_it = self.level_box.GetValue()+": "+self.level_names.GetValue()
self.high_level_eqarea.text(-1.2, 1.15, what_is_it, {
'family': self.font_type, 'fontsize': 10*self.GUI_RESOLUTION, 'style': 'normal', 'va': 'center', 'ha': 'left'})
if self.ie_open:
self.ie.draw_net()
self.ie.write(what_is_it)
# plot elements directions
self.plot_high_level_elements()
# plot elements means
self.plot_high_level_means()
# update high level stats after plotting in case of change
self.update_high_level_stats()
# check sample orietation
if self.check_orient_on:
self.calc_and_plot_sample_orient_check()
self.canvas4.draw()
if self.ie_open:
self.ie.draw() |
def _orientation_ok_to_bridge_contigs(self, start_hit, end_hit):
'''Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits'''
assert start_hit.qry_name == end_hit.qry_name
if start_hit.ref_name == end_hit.ref_name:
return False
if (
(self._is_at_ref_end(start_hit) and start_hit.on_same_strand())
or (self._is_at_ref_start(start_hit) and not start_hit.on_same_strand())
):
start_hit_ok = True
else:
start_hit_ok = False
if (
(self._is_at_ref_start(end_hit) and end_hit.on_same_strand())
or (self._is_at_ref_end(end_hit) and not end_hit.on_same_strand())
):
end_hit_ok = True
else:
end_hit_ok = False
return start_hit_ok and end_hit_ok | Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits | Below is the the instruction that describes the task:
### Input:
Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits
### Response:
def _orientation_ok_to_bridge_contigs(self, start_hit, end_hit):
'''Returns True iff the orientation of the hits means that the query contig of both hits can bridge the reference contigs of the hits'''
assert start_hit.qry_name == end_hit.qry_name
if start_hit.ref_name == end_hit.ref_name:
return False
if (
(self._is_at_ref_end(start_hit) and start_hit.on_same_strand())
or (self._is_at_ref_start(start_hit) and not start_hit.on_same_strand())
):
start_hit_ok = True
else:
start_hit_ok = False
if (
(self._is_at_ref_start(end_hit) and end_hit.on_same_strand())
or (self._is_at_ref_end(end_hit) and not end_hit.on_same_strand())
):
end_hit_ok = True
else:
end_hit_ok = False
return start_hit_ok and end_hit_ok |
def find_previous_sibling(self, *args, **kwargs):
"""
Like :meth:`find`, but searches through :attr:`previous_siblings`
"""
op = operator.methodcaller('find_previous_sibling', *args, **kwargs)
return self._wrap_node(op) | Like :meth:`find`, but searches through :attr:`previous_siblings` | Below is the the instruction that describes the task:
### Input:
Like :meth:`find`, but searches through :attr:`previous_siblings`
### Response:
def find_previous_sibling(self, *args, **kwargs):
"""
Like :meth:`find`, but searches through :attr:`previous_siblings`
"""
op = operator.methodcaller('find_previous_sibling', *args, **kwargs)
return self._wrap_node(op) |
def table_mask(self):
"""ndarray, True where table margin <= min_base_size, same shape as slice."""
margin = compress_pruned(
self._slice.margin(
axis=None,
weighted=False,
include_transforms_for_dims=self._hs_dims,
prune=self._prune,
)
)
mask = margin < self._size
if margin.shape == self._shape:
return mask
if self._slice.dim_types[0] == DT.MR:
# If the margin is a column vector - broadcast it's mask to the array shape
return np.logical_or(np.zeros(self._shape, dtype=bool), mask[:, None])
return np.logical_or(np.zeros(self._shape, dtype=bool), mask) | ndarray, True where table margin <= min_base_size, same shape as slice. | Below is the the instruction that describes the task:
### Input:
ndarray, True where table margin <= min_base_size, same shape as slice.
### Response:
def table_mask(self):
"""ndarray, True where table margin <= min_base_size, same shape as slice."""
margin = compress_pruned(
self._slice.margin(
axis=None,
weighted=False,
include_transforms_for_dims=self._hs_dims,
prune=self._prune,
)
)
mask = margin < self._size
if margin.shape == self._shape:
return mask
if self._slice.dim_types[0] == DT.MR:
# If the margin is a column vector - broadcast it's mask to the array shape
return np.logical_or(np.zeros(self._shape, dtype=bool), mask[:, None])
return np.logical_or(np.zeros(self._shape, dtype=bool), mask) |
def _opposite_axis_margin(self):
"""ndarray representing margin along the axis opposite of self._axis
In the process of calculating p-values for the column significance testing we
need both the margin along the primary axis and the percentage margin along
the opposite axis.
"""
off_axis = 1 - self._axis
return self._slice.margin(axis=off_axis, include_mr_cat=self._include_mr_cat) | ndarray representing margin along the axis opposite of self._axis
In the process of calculating p-values for the column significance testing we
need both the margin along the primary axis and the percentage margin along
the opposite axis. | Below is the the instruction that describes the task:
### Input:
ndarray representing margin along the axis opposite of self._axis
In the process of calculating p-values for the column significance testing we
need both the margin along the primary axis and the percentage margin along
the opposite axis.
### Response:
def _opposite_axis_margin(self):
"""ndarray representing margin along the axis opposite of self._axis
In the process of calculating p-values for the column significance testing we
need both the margin along the primary axis and the percentage margin along
the opposite axis.
"""
off_axis = 1 - self._axis
return self._slice.margin(axis=off_axis, include_mr_cat=self._include_mr_cat) |
def parse_nni_function(code):
"""Parse `nni.function_choice` expression.
Return the AST node of annotated expression and a list of dumped function call expressions.
code: annotation string
"""
name, call = parse_annotation_function(code, 'function_choice')
funcs = [ast.dump(func, False) for func in call.args]
convert_args_to_dict(call, with_lambda=True)
name_str = astor.to_source(name).strip()
call.keywords[0].value = ast.Str(s=name_str)
return call, funcs | Parse `nni.function_choice` expression.
Return the AST node of annotated expression and a list of dumped function call expressions.
code: annotation string | Below is the the instruction that describes the task:
### Input:
Parse `nni.function_choice` expression.
Return the AST node of annotated expression and a list of dumped function call expressions.
code: annotation string
### Response:
def parse_nni_function(code):
"""Parse `nni.function_choice` expression.
Return the AST node of annotated expression and a list of dumped function call expressions.
code: annotation string
"""
name, call = parse_annotation_function(code, 'function_choice')
funcs = [ast.dump(func, False) for func in call.args]
convert_args_to_dict(call, with_lambda=True)
name_str = astor.to_source(name).strip()
call.keywords[0].value = ast.Str(s=name_str)
return call, funcs |
def set_volume(self, pct, channel=None):
"""
Sets the sound volume to the given percentage [0-100] by calling
``amixer -q set <channel> <pct>%``.
If the channel is not specified, it tries to determine the default one
by running ``amixer scontrols``. If that fails as well, it uses the
``Playback`` channel, as that is the only channel on the EV3.
"""
if channel is None:
channel = self._get_channel()
cmd_line = '/usr/bin/amixer -q set {0} {1:d}%'.format(channel, pct)
Popen(shlex.split(cmd_line)).wait() | Sets the sound volume to the given percentage [0-100] by calling
``amixer -q set <channel> <pct>%``.
If the channel is not specified, it tries to determine the default one
by running ``amixer scontrols``. If that fails as well, it uses the
``Playback`` channel, as that is the only channel on the EV3. | Below is the the instruction that describes the task:
### Input:
Sets the sound volume to the given percentage [0-100] by calling
``amixer -q set <channel> <pct>%``.
If the channel is not specified, it tries to determine the default one
by running ``amixer scontrols``. If that fails as well, it uses the
``Playback`` channel, as that is the only channel on the EV3.
### Response:
def set_volume(self, pct, channel=None):
"""
Sets the sound volume to the given percentage [0-100] by calling
``amixer -q set <channel> <pct>%``.
If the channel is not specified, it tries to determine the default one
by running ``amixer scontrols``. If that fails as well, it uses the
``Playback`` channel, as that is the only channel on the EV3.
"""
if channel is None:
channel = self._get_channel()
cmd_line = '/usr/bin/amixer -q set {0} {1:d}%'.format(channel, pct)
Popen(shlex.split(cmd_line)).wait() |
def connect(self):
'''
Registers a new device + username with the bridge
'''
# Don't try to register if we already have
if self.validate_registration():
return True
body = {
'devicetype': self.device_type,
'username': self.username,
}
response = self.make_request('POST', '/api', body)
if 'error' in response:
if response['error']['type'] == 101:
msg = 'Please press the link button and try again'
else:
msg = response['error']['description']
raise Exception(msg) | Registers a new device + username with the bridge | Below is the the instruction that describes the task:
### Input:
Registers a new device + username with the bridge
### Response:
def connect(self):
'''
Registers a new device + username with the bridge
'''
# Don't try to register if we already have
if self.validate_registration():
return True
body = {
'devicetype': self.device_type,
'username': self.username,
}
response = self.make_request('POST', '/api', body)
if 'error' in response:
if response['error']['type'] == 101:
msg = 'Please press the link button and try again'
else:
msg = response['error']['description']
raise Exception(msg) |
def sparsify_rows(x, quantile=0.01):
'''
Return a row-sparse matrix approximating the input `x`.
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of `x`
Returns
-------
x_sparse : `scipy.sparse.csr_matrix` [shape=x.shape]
Row-sparsified approximation of `x`
If `x.ndim == 1`, then `x` is interpreted as a row vector,
and `x_sparse.shape == (1, len(x))`.
Raises
------
ParameterError
If `x.ndim > 2`
If `quantile` lies outside `[0, 1.0)`
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct a Hann window to sparsify
>>> x = scipy.signal.hann(32)
>>> x
array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0.041, 0.01 , 0. ])
>>> # Discard the bottom percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 26 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0. , 0. , 0. ]])
>>> # Discard up to the bottom 10th percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 20 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. ,
0. , 0. , 0. , 0. ]])
'''
if x.ndim == 1:
x = x.reshape((1, -1))
elif x.ndim > 2:
raise ParameterError('Input must have 2 or fewer dimensions. '
'Provided x.shape={}.'.format(x.shape))
if not 0.0 <= quantile < 1:
raise ParameterError('Invalid quantile {:.2f}'.format(quantile))
x_sparse = scipy.sparse.lil_matrix(x.shape, dtype=x.dtype)
mags = np.abs(x)
norms = np.sum(mags, axis=1, keepdims=True)
mag_sort = np.sort(mags, axis=1)
cumulative_mag = np.cumsum(mag_sort / norms, axis=1)
threshold_idx = np.argmin(cumulative_mag < quantile, axis=1)
for i, j in enumerate(threshold_idx):
idx = np.where(mags[i] >= mag_sort[i, j])
x_sparse[i, idx] = x[i, idx]
return x_sparse.tocsr() | Return a row-sparse matrix approximating the input `x`.
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of `x`
Returns
-------
x_sparse : `scipy.sparse.csr_matrix` [shape=x.shape]
Row-sparsified approximation of `x`
If `x.ndim == 1`, then `x` is interpreted as a row vector,
and `x_sparse.shape == (1, len(x))`.
Raises
------
ParameterError
If `x.ndim > 2`
If `quantile` lies outside `[0, 1.0)`
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct a Hann window to sparsify
>>> x = scipy.signal.hann(32)
>>> x
array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0.041, 0.01 , 0. ])
>>> # Discard the bottom percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 26 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0. , 0. , 0. ]])
>>> # Discard up to the bottom 10th percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 20 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. ,
0. , 0. , 0. , 0. ]]) | Below is the the instruction that describes the task:
### Input:
Return a row-sparse matrix approximating the input `x`.
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of `x`
Returns
-------
x_sparse : `scipy.sparse.csr_matrix` [shape=x.shape]
Row-sparsified approximation of `x`
If `x.ndim == 1`, then `x` is interpreted as a row vector,
and `x_sparse.shape == (1, len(x))`.
Raises
------
ParameterError
If `x.ndim > 2`
If `quantile` lies outside `[0, 1.0)`
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct a Hann window to sparsify
>>> x = scipy.signal.hann(32)
>>> x
array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0.041, 0.01 , 0. ])
>>> # Discard the bottom percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 26 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0. , 0. , 0. ]])
>>> # Discard up to the bottom 10th percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 20 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. ,
0. , 0. , 0. , 0. ]])
### Response:
def sparsify_rows(x, quantile=0.01):
'''
Return a row-sparse matrix approximating the input `x`.
Parameters
----------
x : np.ndarray [ndim <= 2]
The input matrix to sparsify.
quantile : float in [0, 1.0)
Percentage of magnitude to discard in each row of `x`
Returns
-------
x_sparse : `scipy.sparse.csr_matrix` [shape=x.shape]
Row-sparsified approximation of `x`
If `x.ndim == 1`, then `x` is interpreted as a row vector,
and `x_sparse.shape == (1, len(x))`.
Raises
------
ParameterError
If `x.ndim > 2`
If `quantile` lies outside `[0, 1.0)`
Notes
-----
This function caches at level 40.
Examples
--------
>>> # Construct a Hann window to sparsify
>>> x = scipy.signal.hann(32)
>>> x
array([ 0. , 0.01 , 0.041, 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0.041, 0.01 , 0. ])
>>> # Discard the bottom percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.01)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 26 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0.09 , 0.156, 0.236, 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0.236, 0.156,
0.09 , 0. , 0. , 0. ]])
>>> # Discard up to the bottom 10th percentile
>>> x_sparse = librosa.util.sparsify_rows(x, quantile=0.1)
>>> x_sparse
<1x32 sparse matrix of type '<type 'numpy.float64'>'
with 20 stored elements in Compressed Sparse Row format>
>>> x_sparse.todense()
matrix([[ 0. , 0. , 0. , 0. , 0. , 0. , 0.326,
0.424, 0.525, 0.625, 0.72 , 0.806, 0.879, 0.937,
0.977, 0.997, 0.997, 0.977, 0.937, 0.879, 0.806,
0.72 , 0.625, 0.525, 0.424, 0.326, 0. , 0. ,
0. , 0. , 0. , 0. ]])
'''
if x.ndim == 1:
x = x.reshape((1, -1))
elif x.ndim > 2:
raise ParameterError('Input must have 2 or fewer dimensions. '
'Provided x.shape={}.'.format(x.shape))
if not 0.0 <= quantile < 1:
raise ParameterError('Invalid quantile {:.2f}'.format(quantile))
x_sparse = scipy.sparse.lil_matrix(x.shape, dtype=x.dtype)
mags = np.abs(x)
norms = np.sum(mags, axis=1, keepdims=True)
mag_sort = np.sort(mags, axis=1)
cumulative_mag = np.cumsum(mag_sort / norms, axis=1)
threshold_idx = np.argmin(cumulative_mag < quantile, axis=1)
for i, j in enumerate(threshold_idx):
idx = np.where(mags[i] >= mag_sort[i, j])
x_sparse[i, idx] = x[i, idx]
return x_sparse.tocsr() |
def get_access_token_from_code(
self, code, redirect_uri, app_id, app_secret
):
"""Get an access token from the "code" returned from an OAuth dialog.
Returns a dict containing the user-specific access token and its
expiration date (if applicable).
"""
args = {
"code": code,
"redirect_uri": redirect_uri,
"client_id": app_id,
"client_secret": app_secret,
}
return self.request(
"{0}/oauth/access_token".format(self.version), args
) | Get an access token from the "code" returned from an OAuth dialog.
Returns a dict containing the user-specific access token and its
expiration date (if applicable). | Below is the the instruction that describes the task:
### Input:
Get an access token from the "code" returned from an OAuth dialog.
Returns a dict containing the user-specific access token and its
expiration date (if applicable).
### Response:
def get_access_token_from_code(
self, code, redirect_uri, app_id, app_secret
):
"""Get an access token from the "code" returned from an OAuth dialog.
Returns a dict containing the user-specific access token and its
expiration date (if applicable).
"""
args = {
"code": code,
"redirect_uri": redirect_uri,
"client_id": app_id,
"client_secret": app_secret,
}
return self.request(
"{0}/oauth/access_token".format(self.version), args
) |
def entry_point(__func: Callable) -> Callable:
"""Execute function when module is run directly.
Note:
This allows fall through for importing modules that use it.
Args:
__func: Function to run
"""
if __func.__module__ == '__main__':
import sys
sys.exit(__func())
else:
return __func | Execute function when module is run directly.
Note:
This allows fall through for importing modules that use it.
Args:
__func: Function to run | Below is the the instruction that describes the task:
### Input:
Execute function when module is run directly.
Note:
This allows fall through for importing modules that use it.
Args:
__func: Function to run
### Response:
def entry_point(__func: Callable) -> Callable:
"""Execute function when module is run directly.
Note:
This allows fall through for importing modules that use it.
Args:
__func: Function to run
"""
if __func.__module__ == '__main__':
import sys
sys.exit(__func())
else:
return __func |
def build_duration(self):
"""Return the difference between build and build_done states"""
return int(self.state.build_done) - int(self.state.build) | Return the difference between build and build_done states | Below is the the instruction that describes the task:
### Input:
Return the difference between build and build_done states
### Response:
def build_duration(self):
"""Return the difference between build and build_done states"""
return int(self.state.build_done) - int(self.state.build) |
def set_default_format_options(self, format_options, read=False):
"""Set default format option"""
if self.default_notebook_metadata_filter:
format_options.setdefault('notebook_metadata_filter', self.default_notebook_metadata_filter)
if self.default_cell_metadata_filter:
format_options.setdefault('cell_metadata_filter', self.default_cell_metadata_filter)
if self.comment_magics is not None:
format_options.setdefault('comment_magics', self.comment_magics)
if self.split_at_heading:
format_options.setdefault('split_at_heading', self.split_at_heading)
if not read and self.default_cell_markers:
format_options.setdefault('cell_markers', self.default_cell_markers)
if read and self.sphinx_convert_rst2md:
format_options.setdefault('rst2md', self.sphinx_convert_rst2md) | Set default format option | Below is the the instruction that describes the task:
### Input:
Set default format option
### Response:
def set_default_format_options(self, format_options, read=False):
"""Set default format option"""
if self.default_notebook_metadata_filter:
format_options.setdefault('notebook_metadata_filter', self.default_notebook_metadata_filter)
if self.default_cell_metadata_filter:
format_options.setdefault('cell_metadata_filter', self.default_cell_metadata_filter)
if self.comment_magics is not None:
format_options.setdefault('comment_magics', self.comment_magics)
if self.split_at_heading:
format_options.setdefault('split_at_heading', self.split_at_heading)
if not read and self.default_cell_markers:
format_options.setdefault('cell_markers', self.default_cell_markers)
if read and self.sphinx_convert_rst2md:
format_options.setdefault('rst2md', self.sphinx_convert_rst2md) |
def suggest(q='', results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None,
max_hotttnesss=None, min_hotttnesss=None):
"""Suggest artists based upon partial names.
Args:
Kwargs:
q (str): The text to suggest artists from
results (int): An integer number of results to return
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
Returns:
A list of Artist objects
Example:
>>> results = artist.suggest(text='rad')
>>> results
>>>
"""
buckets = buckets or []
kwargs = {}
kwargs['q'] = q
if max_familiarity is not None:
kwargs['max_familiarity'] = max_familiarity
if min_familiarity is not None:
kwargs['min_familiarity'] = min_familiarity
if max_hotttnesss is not None:
kwargs['max_hotttnesss'] = max_hotttnesss
if min_hotttnesss is not None:
kwargs['min_hotttnesss'] = min_hotttnesss
if results:
kwargs['results'] = results
if buckets:
kwargs['bucket'] = buckets
if limit:
kwargs['limit'] = 'true'
result = util.callm("%s/%s" % ('artist', 'suggest'), kwargs)
return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']] | Suggest artists based upon partial names.
Args:
Kwargs:
q (str): The text to suggest artists from
results (int): An integer number of results to return
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
Returns:
A list of Artist objects
Example:
>>> results = artist.suggest(text='rad')
>>> results
>>> | Below is the the instruction that describes the task:
### Input:
Suggest artists based upon partial names.
Args:
Kwargs:
q (str): The text to suggest artists from
results (int): An integer number of results to return
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
Returns:
A list of Artist objects
Example:
>>> results = artist.suggest(text='rad')
>>> results
>>>
### Response:
def suggest(q='', results=15, buckets=None, limit=False, max_familiarity=None, min_familiarity=None,
max_hotttnesss=None, min_hotttnesss=None):
"""Suggest artists based upon partial names.
Args:
Kwargs:
q (str): The text to suggest artists from
results (int): An integer number of results to return
buckets (list): A list of strings specifying which buckets to retrieve
limit (bool): A boolean indicating whether or not to limit the results to one of the id spaces specified in buckets
max_familiarity (float): A float specifying the max familiarity of artists to search for
min_familiarity (float): A float specifying the min familiarity of artists to search for
max_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
min_hotttnesss (float): A float specifying the max hotttnesss of artists to search for
Returns:
A list of Artist objects
Example:
>>> results = artist.suggest(text='rad')
>>> results
>>>
"""
buckets = buckets or []
kwargs = {}
kwargs['q'] = q
if max_familiarity is not None:
kwargs['max_familiarity'] = max_familiarity
if min_familiarity is not None:
kwargs['min_familiarity'] = min_familiarity
if max_hotttnesss is not None:
kwargs['max_hotttnesss'] = max_hotttnesss
if min_hotttnesss is not None:
kwargs['min_hotttnesss'] = min_hotttnesss
if results:
kwargs['results'] = results
if buckets:
kwargs['bucket'] = buckets
if limit:
kwargs['limit'] = 'true'
result = util.callm("%s/%s" % ('artist', 'suggest'), kwargs)
return [Artist(**util.fix(a_dict)) for a_dict in result['response']['artists']] |
def annotation_rows(prefix, annotations):
"""
Helper function to extract N: and C: rows from annotations and pad their values
"""
ncol = len(annotations['Column Name'])
return {name.replace(prefix, '', 1) : values + [''] * (ncol - len(values))
for name, values in annotations.items() if name.startswith(prefix)} | Helper function to extract N: and C: rows from annotations and pad their values | Below is the the instruction that describes the task:
### Input:
Helper function to extract N: and C: rows from annotations and pad their values
### Response:
def annotation_rows(prefix, annotations):
"""
Helper function to extract N: and C: rows from annotations and pad their values
"""
ncol = len(annotations['Column Name'])
return {name.replace(prefix, '', 1) : values + [''] * (ncol - len(values))
for name, values in annotations.items() if name.startswith(prefix)} |
def reply_ok(self):
"""Return True if this is a reply and its first argument is 'ok'."""
return (self.mtype == self.REPLY and self.arguments and
self.arguments[0] == self.OK) | Return True if this is a reply and its first argument is 'ok'. | Below is the the instruction that describes the task:
### Input:
Return True if this is a reply and its first argument is 'ok'.
### Response:
def reply_ok(self):
"""Return True if this is a reply and its first argument is 'ok'."""
return (self.mtype == self.REPLY and self.arguments and
self.arguments[0] == self.OK) |
def mark(self):
'''Mark the line and column information of the result of this parser.'''
def pos(text, index):
return ParseError.loc_info(text, index)
@Parser
def mark_parser(text, index):
res = self(text, index)
if res.status:
return Value.success(res.index, (pos(text, index), res.value, pos(text, res.index)))
else:
return res # failed.
return mark_parser | Mark the line and column information of the result of this parser. | Below is the the instruction that describes the task:
### Input:
Mark the line and column information of the result of this parser.
### Response:
def mark(self):
'''Mark the line and column information of the result of this parser.'''
def pos(text, index):
return ParseError.loc_info(text, index)
@Parser
def mark_parser(text, index):
res = self(text, index)
if res.status:
return Value.success(res.index, (pos(text, index), res.value, pos(text, res.index)))
else:
return res # failed.
return mark_parser |
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default False).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = long(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx) | Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default False).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10 | Below is the the instruction that describes the task:
### Input:
Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default False).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
### Response:
def sample(self, withReplacement=None, fraction=None, seed=None):
"""Returns a sampled subset of this :class:`DataFrame`.
:param withReplacement: Sample with replacement or not (default False).
:param fraction: Fraction of rows to generate, range [0.0, 1.0].
:param seed: Seed for sampling (default a random seed).
.. note:: This is not guaranteed to provide exactly the fraction specified of the total
count of the given :class:`DataFrame`.
.. note:: `fraction` is required and, `withReplacement` and `seed` are optional.
>>> df = spark.range(10)
>>> df.sample(0.5, 3).count()
7
>>> df.sample(fraction=0.5, seed=3).count()
7
>>> df.sample(withReplacement=True, fraction=0.5, seed=3).count()
1
>>> df.sample(1.0).count()
10
>>> df.sample(fraction=1.0).count()
10
>>> df.sample(False, fraction=1.0).count()
10
"""
# For the cases below:
# sample(True, 0.5 [, seed])
# sample(True, fraction=0.5 [, seed])
# sample(withReplacement=False, fraction=0.5 [, seed])
is_withReplacement_set = \
type(withReplacement) == bool and isinstance(fraction, float)
# For the case below:
# sample(faction=0.5 [, seed])
is_withReplacement_omitted_kwargs = \
withReplacement is None and isinstance(fraction, float)
# For the case below:
# sample(0.5 [, seed])
is_withReplacement_omitted_args = isinstance(withReplacement, float)
if not (is_withReplacement_set
or is_withReplacement_omitted_kwargs
or is_withReplacement_omitted_args):
argtypes = [
str(type(arg)) for arg in [withReplacement, fraction, seed] if arg is not None]
raise TypeError(
"withReplacement (optional), fraction (required) and seed (optional)"
" should be a bool, float and number; however, "
"got [%s]." % ", ".join(argtypes))
if is_withReplacement_omitted_args:
if fraction is not None:
seed = fraction
fraction = withReplacement
withReplacement = None
seed = long(seed) if seed is not None else None
args = [arg for arg in [withReplacement, fraction, seed] if arg is not None]
jdf = self._jdf.sample(*args)
return DataFrame(jdf, self.sql_ctx) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.