code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def depth(self, value):
""" Update ourself and any of our subcommands. """
for command in self.subcommands.values():
command.depth = value + 1
del command.argparser._defaults[self.arg_label_fmt % self._depth]
command.argparser._defaults[self.arg_label_fmt % value] = command
self._depth = value | Update ourself and any of our subcommands. | Below is the the instruction that describes the task:
### Input:
Update ourself and any of our subcommands.
### Response:
def depth(self, value):
""" Update ourself and any of our subcommands. """
for command in self.subcommands.values():
command.depth = value + 1
del command.argparser._defaults[self.arg_label_fmt % self._depth]
command.argparser._defaults[self.arg_label_fmt % value] = command
self._depth = value |
def load(self, filepath):
# type: (str) -> None
"""Load configuration from existing file.
:param str filepath: Path to existing config file.
:raises: ValueError if supplied config file is invalid.
"""
try:
self._config.read(filepath)
import ast
self.connection.timeout = \
self._config.getint("Connection", "timeout")
self.connection.verify = \
self._config.getboolean("Connection", "verify")
self.connection.cert = \
self._config.get("Connection", "cert")
self.proxies.proxies = \
ast.literal_eval(self._config.get("Proxies", "proxies"))
self.proxies.use_env_settings = \
self._config.getboolean("Proxies", "env_settings")
self.redirect_policy.allow = \
self._config.getboolean("RedirectPolicy", "allow")
self.redirect_policy.max_redirects = \
self._config.getint("RedirectPolicy", "max_redirects")
except (ValueError, EnvironmentError, NoOptionError):
error = "Supplied config file incompatible."
raise_with_traceback(ValueError, error)
finally:
self._clear_config() | Load configuration from existing file.
:param str filepath: Path to existing config file.
:raises: ValueError if supplied config file is invalid. | Below is the the instruction that describes the task:
### Input:
Load configuration from existing file.
:param str filepath: Path to existing config file.
:raises: ValueError if supplied config file is invalid.
### Response:
def load(self, filepath):
# type: (str) -> None
"""Load configuration from existing file.
:param str filepath: Path to existing config file.
:raises: ValueError if supplied config file is invalid.
"""
try:
self._config.read(filepath)
import ast
self.connection.timeout = \
self._config.getint("Connection", "timeout")
self.connection.verify = \
self._config.getboolean("Connection", "verify")
self.connection.cert = \
self._config.get("Connection", "cert")
self.proxies.proxies = \
ast.literal_eval(self._config.get("Proxies", "proxies"))
self.proxies.use_env_settings = \
self._config.getboolean("Proxies", "env_settings")
self.redirect_policy.allow = \
self._config.getboolean("RedirectPolicy", "allow")
self.redirect_policy.max_redirects = \
self._config.getint("RedirectPolicy", "max_redirects")
except (ValueError, EnvironmentError, NoOptionError):
error = "Supplied config file incompatible."
raise_with_traceback(ValueError, error)
finally:
self._clear_config() |
def queryModelIDs(self):
"""Queuries DB for model IDs of all currently instantiated models
associated with this HyperSearch job.
See also: _iterModels()
Parameters:
----------------------------------------------------------------------
retval: A sequence of Nupic modelIDs
"""
jobID = self.getJobID()
modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID)
modelIDs = tuple(x[0] for x in modelCounterPairs)
return modelIDs | Queuries DB for model IDs of all currently instantiated models
associated with this HyperSearch job.
See also: _iterModels()
Parameters:
----------------------------------------------------------------------
retval: A sequence of Nupic modelIDs | Below is the the instruction that describes the task:
### Input:
Queuries DB for model IDs of all currently instantiated models
associated with this HyperSearch job.
See also: _iterModels()
Parameters:
----------------------------------------------------------------------
retval: A sequence of Nupic modelIDs
### Response:
def queryModelIDs(self):
"""Queuries DB for model IDs of all currently instantiated models
associated with this HyperSearch job.
See also: _iterModels()
Parameters:
----------------------------------------------------------------------
retval: A sequence of Nupic modelIDs
"""
jobID = self.getJobID()
modelCounterPairs = _clientJobsDB().modelsGetUpdateCounters(jobID)
modelIDs = tuple(x[0] for x in modelCounterPairs)
return modelIDs |
def checkppolicy(self, **params):
""" search user page """
self._check_auth(must_admin=False, redir_login=False)
keys = list(params.keys())
if len(keys) != 1:
cherrypy.response.status = 400
return "bad argument"
password = params[keys[0]]
is_admin = self._check_admin()
ret = self._checkppolicy(password)
if ret['match']:
cherrypy.response.status = 200
else:
cherrypy.response.status = 200
return json.dumps(ret, separators=(',', ':')) | search user page | Below is the the instruction that describes the task:
### Input:
search user page
### Response:
def checkppolicy(self, **params):
""" search user page """
self._check_auth(must_admin=False, redir_login=False)
keys = list(params.keys())
if len(keys) != 1:
cherrypy.response.status = 400
return "bad argument"
password = params[keys[0]]
is_admin = self._check_admin()
ret = self._checkppolicy(password)
if ret['match']:
cherrypy.response.status = 200
else:
cherrypy.response.status = 200
return json.dumps(ret, separators=(',', ':')) |
def metrics(self, *metrics):
""" Add a list of Metric ingredients to the query. These can either be
Metric objects or strings representing metrics on the shelf.
The Metric expression will be added to the query's select statement.
The metric value is a property of each row of the result.
:param metrics: Metrics to add to the recipe. Metrics can
either be keys on the ``shelf`` or
Metric objects
:type metrics: list
"""
for m in metrics:
self._cauldron.use(self._shelf.find(m, Metric))
self.dirty = True
return self | Add a list of Metric ingredients to the query. These can either be
Metric objects or strings representing metrics on the shelf.
The Metric expression will be added to the query's select statement.
The metric value is a property of each row of the result.
:param metrics: Metrics to add to the recipe. Metrics can
either be keys on the ``shelf`` or
Metric objects
:type metrics: list | Below is the the instruction that describes the task:
### Input:
Add a list of Metric ingredients to the query. These can either be
Metric objects or strings representing metrics on the shelf.
The Metric expression will be added to the query's select statement.
The metric value is a property of each row of the result.
:param metrics: Metrics to add to the recipe. Metrics can
either be keys on the ``shelf`` or
Metric objects
:type metrics: list
### Response:
def metrics(self, *metrics):
""" Add a list of Metric ingredients to the query. These can either be
Metric objects or strings representing metrics on the shelf.
The Metric expression will be added to the query's select statement.
The metric value is a property of each row of the result.
:param metrics: Metrics to add to the recipe. Metrics can
either be keys on the ``shelf`` or
Metric objects
:type metrics: list
"""
for m in metrics:
self._cauldron.use(self._shelf.find(m, Metric))
self.dirty = True
return self |
def get_restore_path(self, status=None):
""" get_restore_path: get path to restoration file
Args:
status (str): step to get restore file (optional)
Returns: string path to restoration file
"""
status = self.get_status() if status is None else status
return config.get_restore_path(status.name.lower()) | get_restore_path: get path to restoration file
Args:
status (str): step to get restore file (optional)
Returns: string path to restoration file | Below is the the instruction that describes the task:
### Input:
get_restore_path: get path to restoration file
Args:
status (str): step to get restore file (optional)
Returns: string path to restoration file
### Response:
def get_restore_path(self, status=None):
""" get_restore_path: get path to restoration file
Args:
status (str): step to get restore file (optional)
Returns: string path to restoration file
"""
status = self.get_status() if status is None else status
return config.get_restore_path(status.name.lower()) |
def patch_cluster_custom_object_status(self, group, version, plural, name, body, **kwargs): # noqa: E501
"""patch_cluster_custom_object_status # noqa: E501
partially update status of the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_custom_object_status(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
return data | patch_cluster_custom_object_status # noqa: E501
partially update status of the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_custom_object_status(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
patch_cluster_custom_object_status # noqa: E501
partially update status of the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_custom_object_status(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
### Response:
def patch_cluster_custom_object_status(self, group, version, plural, name, body, **kwargs): # noqa: E501
"""patch_cluster_custom_object_status # noqa: E501
partially update status of the specified cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_cluster_custom_object_status(group, version, plural, name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:param UNKNOWN_BASE_TYPE body: (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_cluster_custom_object_status_with_http_info(group, version, plural, name, body, **kwargs) # noqa: E501
return data |
def persistent_attributes(self, persistent_attributes):
# type: (Dict[str, object]) -> None
"""Overwrites and caches the persistent attributes value.
Note that the persistent attributes will not be saved to
persistence layer until the save_persistent_attributes method
is called.
:param persistent_attributes: attributes in persistence layer
:type persistent_attributes: Dict[str, object]
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to set persistent attributes without persistence adapter
"""
if not self._persistence_adapter:
raise AttributesManagerException(
"Cannot set PersistentAttributes without persistence adapter!")
self._persistence_attributes = persistent_attributes
self._persistent_attributes_set = True | Overwrites and caches the persistent attributes value.
Note that the persistent attributes will not be saved to
persistence layer until the save_persistent_attributes method
is called.
:param persistent_attributes: attributes in persistence layer
:type persistent_attributes: Dict[str, object]
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to set persistent attributes without persistence adapter | Below is the the instruction that describes the task:
### Input:
Overwrites and caches the persistent attributes value.
Note that the persistent attributes will not be saved to
persistence layer until the save_persistent_attributes method
is called.
:param persistent_attributes: attributes in persistence layer
:type persistent_attributes: Dict[str, object]
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to set persistent attributes without persistence adapter
### Response:
def persistent_attributes(self, persistent_attributes):
# type: (Dict[str, object]) -> None
"""Overwrites and caches the persistent attributes value.
Note that the persistent attributes will not be saved to
persistence layer until the save_persistent_attributes method
is called.
:param persistent_attributes: attributes in persistence layer
:type persistent_attributes: Dict[str, object]
:raises: :py:class:`ask_sdk_core.exceptions.AttributesManagerException`
if trying to set persistent attributes without persistence adapter
"""
if not self._persistence_adapter:
raise AttributesManagerException(
"Cannot set PersistentAttributes without persistence adapter!")
self._persistence_attributes = persistent_attributes
self._persistent_attributes_set = True |
def parsed_aggregate_reports_to_csv(reports):
"""
Converts one or more parsed aggregate reports to flat CSV format, including
headers
Args:
reports: A parsed aggregate report or list of parsed aggregate reports
Returns:
str: Parsed aggregate report data in flat CSV format, including headers
"""
def to_str(obj):
return str(obj).lower()
fields = ["xml_schema", "org_name", "org_email",
"org_extra_contact_info", "report_id", "begin_date", "end_date",
"errors", "domain", "adkim", "aspf", "p", "sp", "pct", "fo",
"source_ip_address", "source_country", "source_reverse_dns",
"source_base_domain", "count", "disposition", "dkim_alignment",
"spf_alignment", "policy_override_reasons",
"policy_override_comments", "envelope_from", "header_from",
"envelope_to", "dkim_domains", "dkim_selectors", "dkim_results",
"spf_domains", "spf_scopes", "spf_results"]
csv_file_object = StringIO(newline="\n")
writer = DictWriter(csv_file_object, fields)
writer.writeheader()
if type(reports) == OrderedDict:
reports = [reports]
for report in reports:
xml_schema = report["xml_schema"]
org_name = report["report_metadata"]["org_name"]
org_email = report["report_metadata"]["org_email"]
org_extra_contact = report["report_metadata"]["org_extra_contact_info"]
report_id = report["report_metadata"]["report_id"]
begin_date = report["report_metadata"]["begin_date"]
end_date = report["report_metadata"]["end_date"]
errors = "|".join(report["report_metadata"]["errors"])
domain = report["policy_published"]["domain"]
adkim = report["policy_published"]["adkim"]
aspf = report["policy_published"]["aspf"]
p = report["policy_published"]["p"]
sp = report["policy_published"]["sp"]
pct = report["policy_published"]["pct"]
fo = report["policy_published"]["fo"]
report_dict = dict(xml_schema=xml_schema, org_name=org_name,
org_email=org_email,
org_extra_contact_info=org_extra_contact,
report_id=report_id, begin_date=begin_date,
end_date=end_date, errors=errors, domain=domain,
adkim=adkim, aspf=aspf, p=p, sp=sp, pct=pct, fo=fo)
for record in report["records"]:
row = report_dict
row["source_ip_address"] = record["source"]["ip_address"]
row["source_country"] = record["source"]["country"]
row["source_reverse_dns"] = record["source"]["reverse_dns"]
row["source_base_domain"] = record["source"]["base_domain"]
row["count"] = record["count"]
row["disposition"] = record["policy_evaluated"]["disposition"]
row["spf_alignment"] = record["policy_evaluated"]["spf"]
row["dkim_alignment"] = record["policy_evaluated"]["dkim"]
policy_override_reasons = list(map(
lambda r: r["type"],
record["policy_evaluated"]
["policy_override_reasons"]))
policy_override_comments = list(map(
lambda r: r["comment"] or "none",
record["policy_evaluated"]
["policy_override_reasons"]))
row["policy_override_reasons"] = ",".join(
policy_override_reasons)
row["policy_override_comments"] = "|".join(
policy_override_comments)
row["envelope_from"] = record["identifiers"]["envelope_from"]
row["header_from"] = record["identifiers"]["header_from"]
envelope_to = record["identifiers"]["envelope_to"]
row["envelope_to"] = envelope_to
dkim_domains = []
dkim_selectors = []
dkim_results = []
for dkim_result in record["auth_results"]["dkim"]:
dkim_domains.append(dkim_result["domain"])
if "selector" in dkim_result:
dkim_selectors.append(dkim_result["selector"])
dkim_results.append(dkim_result["result"])
row["dkim_domains"] = ",".join(map(to_str, dkim_domains))
row["dkim_selectors"] = ",".join(map(to_str, dkim_selectors))
row["dkim_results"] = ",".join(map(to_str, dkim_results))
spf_domains = []
spf_scopes = []
spf_results = []
for spf_result in record["auth_results"]["spf"]:
spf_domains.append(spf_result["domain"])
spf_scopes.append(spf_result["scope"])
spf_results.append(spf_result["result"])
row["spf_domains"] = ",".join(map(to_str, spf_domains))
row["spf_scopes"] = ",".join(map(to_str, spf_scopes))
row["spf_results"] = ",".join(map(to_str, dkim_results))
writer.writerow(row)
csv_file_object.flush()
return csv_file_object.getvalue() | Converts one or more parsed aggregate reports to flat CSV format, including
headers
Args:
reports: A parsed aggregate report or list of parsed aggregate reports
Returns:
str: Parsed aggregate report data in flat CSV format, including headers | Below is the the instruction that describes the task:
### Input:
Converts one or more parsed aggregate reports to flat CSV format, including
headers
Args:
reports: A parsed aggregate report or list of parsed aggregate reports
Returns:
str: Parsed aggregate report data in flat CSV format, including headers
### Response:
def parsed_aggregate_reports_to_csv(reports):
"""
Converts one or more parsed aggregate reports to flat CSV format, including
headers
Args:
reports: A parsed aggregate report or list of parsed aggregate reports
Returns:
str: Parsed aggregate report data in flat CSV format, including headers
"""
def to_str(obj):
return str(obj).lower()
fields = ["xml_schema", "org_name", "org_email",
"org_extra_contact_info", "report_id", "begin_date", "end_date",
"errors", "domain", "adkim", "aspf", "p", "sp", "pct", "fo",
"source_ip_address", "source_country", "source_reverse_dns",
"source_base_domain", "count", "disposition", "dkim_alignment",
"spf_alignment", "policy_override_reasons",
"policy_override_comments", "envelope_from", "header_from",
"envelope_to", "dkim_domains", "dkim_selectors", "dkim_results",
"spf_domains", "spf_scopes", "spf_results"]
csv_file_object = StringIO(newline="\n")
writer = DictWriter(csv_file_object, fields)
writer.writeheader()
if type(reports) == OrderedDict:
reports = [reports]
for report in reports:
xml_schema = report["xml_schema"]
org_name = report["report_metadata"]["org_name"]
org_email = report["report_metadata"]["org_email"]
org_extra_contact = report["report_metadata"]["org_extra_contact_info"]
report_id = report["report_metadata"]["report_id"]
begin_date = report["report_metadata"]["begin_date"]
end_date = report["report_metadata"]["end_date"]
errors = "|".join(report["report_metadata"]["errors"])
domain = report["policy_published"]["domain"]
adkim = report["policy_published"]["adkim"]
aspf = report["policy_published"]["aspf"]
p = report["policy_published"]["p"]
sp = report["policy_published"]["sp"]
pct = report["policy_published"]["pct"]
fo = report["policy_published"]["fo"]
report_dict = dict(xml_schema=xml_schema, org_name=org_name,
org_email=org_email,
org_extra_contact_info=org_extra_contact,
report_id=report_id, begin_date=begin_date,
end_date=end_date, errors=errors, domain=domain,
adkim=adkim, aspf=aspf, p=p, sp=sp, pct=pct, fo=fo)
for record in report["records"]:
row = report_dict
row["source_ip_address"] = record["source"]["ip_address"]
row["source_country"] = record["source"]["country"]
row["source_reverse_dns"] = record["source"]["reverse_dns"]
row["source_base_domain"] = record["source"]["base_domain"]
row["count"] = record["count"]
row["disposition"] = record["policy_evaluated"]["disposition"]
row["spf_alignment"] = record["policy_evaluated"]["spf"]
row["dkim_alignment"] = record["policy_evaluated"]["dkim"]
policy_override_reasons = list(map(
lambda r: r["type"],
record["policy_evaluated"]
["policy_override_reasons"]))
policy_override_comments = list(map(
lambda r: r["comment"] or "none",
record["policy_evaluated"]
["policy_override_reasons"]))
row["policy_override_reasons"] = ",".join(
policy_override_reasons)
row["policy_override_comments"] = "|".join(
policy_override_comments)
row["envelope_from"] = record["identifiers"]["envelope_from"]
row["header_from"] = record["identifiers"]["header_from"]
envelope_to = record["identifiers"]["envelope_to"]
row["envelope_to"] = envelope_to
dkim_domains = []
dkim_selectors = []
dkim_results = []
for dkim_result in record["auth_results"]["dkim"]:
dkim_domains.append(dkim_result["domain"])
if "selector" in dkim_result:
dkim_selectors.append(dkim_result["selector"])
dkim_results.append(dkim_result["result"])
row["dkim_domains"] = ",".join(map(to_str, dkim_domains))
row["dkim_selectors"] = ",".join(map(to_str, dkim_selectors))
row["dkim_results"] = ",".join(map(to_str, dkim_results))
spf_domains = []
spf_scopes = []
spf_results = []
for spf_result in record["auth_results"]["spf"]:
spf_domains.append(spf_result["domain"])
spf_scopes.append(spf_result["scope"])
spf_results.append(spf_result["result"])
row["spf_domains"] = ",".join(map(to_str, spf_domains))
row["spf_scopes"] = ",".join(map(to_str, spf_scopes))
row["spf_results"] = ",".join(map(to_str, dkim_results))
writer.writerow(row)
csv_file_object.flush()
return csv_file_object.getvalue() |
def get_vboxes(self):
"""
Get the maximum ID of the VBoxes
:return: Maximum VBox ID
:rtype: int
"""
vbox_list = []
vbox_max = None
for node in self.nodes:
if node['type'] == 'VirtualBoxVM':
vbox_list.append(node['vbox_id'])
if len(vbox_list) > 0:
vbox_max = max(vbox_list)
return vbox_max | Get the maximum ID of the VBoxes
:return: Maximum VBox ID
:rtype: int | Below is the the instruction that describes the task:
### Input:
Get the maximum ID of the VBoxes
:return: Maximum VBox ID
:rtype: int
### Response:
def get_vboxes(self):
"""
Get the maximum ID of the VBoxes
:return: Maximum VBox ID
:rtype: int
"""
vbox_list = []
vbox_max = None
for node in self.nodes:
if node['type'] == 'VirtualBoxVM':
vbox_list.append(node['vbox_id'])
if len(vbox_list) > 0:
vbox_max = max(vbox_list)
return vbox_max |
def verify_light_chains(self, threshold=0.9):
'''
Clusters the light chains to identify potentially spurious (non-lineage)
pairings. Following clustering, all pairs in the largest light chain
cluster are assumed to be correctly paired. For each of those pairs,
the <verified> attribute is set to True. For pairs not in the largest
light chain cluster, the <verified> attribute is set to False.
Inputs (optional)
-----------------
threshold: CD-HIT clustering threshold. Default is 0.9.
'''
lseqs = [l.light for l in self.lights]
clusters = cluster(lseqs, threshold=threshold)
clusters.sort(key=lambda x: x.size, reverse=True)
verified_ids = clusters[0].ids
for p in self.lights:
p.verified = True if p.name in verified_ids else False | Clusters the light chains to identify potentially spurious (non-lineage)
pairings. Following clustering, all pairs in the largest light chain
cluster are assumed to be correctly paired. For each of those pairs,
the <verified> attribute is set to True. For pairs not in the largest
light chain cluster, the <verified> attribute is set to False.
Inputs (optional)
-----------------
threshold: CD-HIT clustering threshold. Default is 0.9. | Below is the the instruction that describes the task:
### Input:
Clusters the light chains to identify potentially spurious (non-lineage)
pairings. Following clustering, all pairs in the largest light chain
cluster are assumed to be correctly paired. For each of those pairs,
the <verified> attribute is set to True. For pairs not in the largest
light chain cluster, the <verified> attribute is set to False.
Inputs (optional)
-----------------
threshold: CD-HIT clustering threshold. Default is 0.9.
### Response:
def verify_light_chains(self, threshold=0.9):
'''
Clusters the light chains to identify potentially spurious (non-lineage)
pairings. Following clustering, all pairs in the largest light chain
cluster are assumed to be correctly paired. For each of those pairs,
the <verified> attribute is set to True. For pairs not in the largest
light chain cluster, the <verified> attribute is set to False.
Inputs (optional)
-----------------
threshold: CD-HIT clustering threshold. Default is 0.9.
'''
lseqs = [l.light for l in self.lights]
clusters = cluster(lseqs, threshold=threshold)
clusters.sort(key=lambda x: x.size, reverse=True)
verified_ids = clusters[0].ids
for p in self.lights:
p.verified = True if p.name in verified_ids else False |
def AddDischargingBattery(self, device_name, model_name, percentage, seconds_to_empty):
'''Convenience method to add a discharging battery object
You have to specify a device name which must be a valid part of an object
path, e. g. "mock_ac", an arbitrary model name, the charge percentage, and
the seconds until the battery is empty.
Please note that this does not set any global properties such as
"on-battery".
Returns the new object path.
'''
path = '/org/freedesktop/UPower/devices/' + device_name
self.AddObject(path,
DEVICE_IFACE,
{
'PowerSupply': dbus.Boolean(True, variant_level=1),
'IsPresent': dbus.Boolean(True, variant_level=1),
'Model': dbus.String(model_name, variant_level=1),
'Percentage': dbus.Double(percentage, variant_level=1),
'TimeToEmpty': dbus.Int64(seconds_to_empty, variant_level=1),
'EnergyFull': dbus.Double(100.0, variant_level=1),
'Energy': dbus.Double(percentage, variant_level=1),
# UP_DEVICE_STATE_DISCHARGING
'State': dbus.UInt32(2, variant_level=1),
# UP_DEVICE_KIND_BATTERY
'Type': dbus.UInt32(2, variant_level=1),
},
[])
self.EmitSignal(MAIN_IFACE, 'DeviceAdded', self.device_sig_type, [path])
return path | Convenience method to add a discharging battery object
You have to specify a device name which must be a valid part of an object
path, e. g. "mock_ac", an arbitrary model name, the charge percentage, and
the seconds until the battery is empty.
Please note that this does not set any global properties such as
"on-battery".
Returns the new object path. | Below is the the instruction that describes the task:
### Input:
Convenience method to add a discharging battery object
You have to specify a device name which must be a valid part of an object
path, e. g. "mock_ac", an arbitrary model name, the charge percentage, and
the seconds until the battery is empty.
Please note that this does not set any global properties such as
"on-battery".
Returns the new object path.
### Response:
def AddDischargingBattery(self, device_name, model_name, percentage, seconds_to_empty):
'''Convenience method to add a discharging battery object
You have to specify a device name which must be a valid part of an object
path, e. g. "mock_ac", an arbitrary model name, the charge percentage, and
the seconds until the battery is empty.
Please note that this does not set any global properties such as
"on-battery".
Returns the new object path.
'''
path = '/org/freedesktop/UPower/devices/' + device_name
self.AddObject(path,
DEVICE_IFACE,
{
'PowerSupply': dbus.Boolean(True, variant_level=1),
'IsPresent': dbus.Boolean(True, variant_level=1),
'Model': dbus.String(model_name, variant_level=1),
'Percentage': dbus.Double(percentage, variant_level=1),
'TimeToEmpty': dbus.Int64(seconds_to_empty, variant_level=1),
'EnergyFull': dbus.Double(100.0, variant_level=1),
'Energy': dbus.Double(percentage, variant_level=1),
# UP_DEVICE_STATE_DISCHARGING
'State': dbus.UInt32(2, variant_level=1),
# UP_DEVICE_KIND_BATTERY
'Type': dbus.UInt32(2, variant_level=1),
},
[])
self.EmitSignal(MAIN_IFACE, 'DeviceAdded', self.device_sig_type, [path])
return path |
def appendBPoint(self, type=None, anchor=None, bcpIn=None, bcpOut=None, bPoint=None):
"""
Append a bPoint to the contour.
"""
if bPoint is not None:
if type is None:
type = bPoint.type
if anchor is None:
anchor = bPoint.anchor
if bcpIn is None:
bcpIn = bPoint.bcpIn
if bcpOut is None:
bcpOut = bPoint.bcpOut
type = normalizers.normalizeBPointType(type)
anchor = normalizers.normalizeCoordinateTuple(anchor)
if bcpIn is None:
bcpIn = (0, 0)
bcpIn = normalizers.normalizeCoordinateTuple(bcpIn)
if bcpOut is None:
bcpOut = (0, 0)
bcpOut = normalizers.normalizeCoordinateTuple(bcpOut)
self._appendBPoint(type, anchor, bcpIn=bcpIn, bcpOut=bcpOut) | Append a bPoint to the contour. | Below is the the instruction that describes the task:
### Input:
Append a bPoint to the contour.
### Response:
def appendBPoint(self, type=None, anchor=None, bcpIn=None, bcpOut=None, bPoint=None):
"""
Append a bPoint to the contour.
"""
if bPoint is not None:
if type is None:
type = bPoint.type
if anchor is None:
anchor = bPoint.anchor
if bcpIn is None:
bcpIn = bPoint.bcpIn
if bcpOut is None:
bcpOut = bPoint.bcpOut
type = normalizers.normalizeBPointType(type)
anchor = normalizers.normalizeCoordinateTuple(anchor)
if bcpIn is None:
bcpIn = (0, 0)
bcpIn = normalizers.normalizeCoordinateTuple(bcpIn)
if bcpOut is None:
bcpOut = (0, 0)
bcpOut = normalizers.normalizeCoordinateTuple(bcpOut)
self._appendBPoint(type, anchor, bcpIn=bcpIn, bcpOut=bcpOut) |
def dump2json(self, obj, filepath, override=False, **kwargs):
"""
Dump a dictionary into a JSON dictionary.
Uses the json.dump() function.
Parameters
----------
obj : :class:`dict`
A dictionary to be dumpped as JSON file.
filepath : :class:`str`
The filepath for the dumped file.
override : :class:`bool`
If True, any file in the filepath will be override. (default=False)
"""
# We make sure that the object passed by the user is a dictionary.
if isinstance(obj, dict):
pass
else:
raise _NotADictionary(
"This function only accepts dictionaries as input")
# We check if the filepath has a json extenstion, if not we add it.
if str(filepath[-4:]) == 'json':
pass
else:
filepath = ".".join((str(filepath), "json"))
# First we check if the file already exists. If yes and the override
# keyword is False (default), we will raise an exception. Otherwise
# the file will be overwritten.
if override is False:
if os.path.isfile(filepath):
raise _FileAlreadyExists(
"The file {0} already exists. Use a different filepath, "
"or set the 'override' kwarg to True.".format(filepath))
# We dump the object to the json file. Additional kwargs can be passed.
with open(filepath, 'w+') as json_file:
json.dump(obj, json_file, **kwargs) | Dump a dictionary into a JSON dictionary.
Uses the json.dump() function.
Parameters
----------
obj : :class:`dict`
A dictionary to be dumpped as JSON file.
filepath : :class:`str`
The filepath for the dumped file.
override : :class:`bool`
If True, any file in the filepath will be override. (default=False) | Below is the the instruction that describes the task:
### Input:
Dump a dictionary into a JSON dictionary.
Uses the json.dump() function.
Parameters
----------
obj : :class:`dict`
A dictionary to be dumpped as JSON file.
filepath : :class:`str`
The filepath for the dumped file.
override : :class:`bool`
If True, any file in the filepath will be override. (default=False)
### Response:
def dump2json(self, obj, filepath, override=False, **kwargs):
"""
Dump a dictionary into a JSON dictionary.
Uses the json.dump() function.
Parameters
----------
obj : :class:`dict`
A dictionary to be dumpped as JSON file.
filepath : :class:`str`
The filepath for the dumped file.
override : :class:`bool`
If True, any file in the filepath will be override. (default=False)
"""
# We make sure that the object passed by the user is a dictionary.
if isinstance(obj, dict):
pass
else:
raise _NotADictionary(
"This function only accepts dictionaries as input")
# We check if the filepath has a json extenstion, if not we add it.
if str(filepath[-4:]) == 'json':
pass
else:
filepath = ".".join((str(filepath), "json"))
# First we check if the file already exists. If yes and the override
# keyword is False (default), we will raise an exception. Otherwise
# the file will be overwritten.
if override is False:
if os.path.isfile(filepath):
raise _FileAlreadyExists(
"The file {0} already exists. Use a different filepath, "
"or set the 'override' kwarg to True.".format(filepath))
# We dump the object to the json file. Additional kwargs can be passed.
with open(filepath, 'w+') as json_file:
json.dump(obj, json_file, **kwargs) |
def create_pg_notify_event(notif):
"""A factory for creating a Postgres Notification Event
(an object inheriting from `cnxpublishing.events.PGNotifyEvent`)
given `notif`, a `psycopg2.extensions.Notify` object.
"""
# TODO Lookup registered events via getAllUtilitiesRegisteredFor
# for class mapping.
if notif.channel not in _CHANNEL_MAPPER:
cls = _CHANNEL_MAPPER[None]
else:
cls = _CHANNEL_MAPPER[notif.channel]
return cls(notif) | A factory for creating a Postgres Notification Event
(an object inheriting from `cnxpublishing.events.PGNotifyEvent`)
given `notif`, a `psycopg2.extensions.Notify` object. | Below is the the instruction that describes the task:
### Input:
A factory for creating a Postgres Notification Event
(an object inheriting from `cnxpublishing.events.PGNotifyEvent`)
given `notif`, a `psycopg2.extensions.Notify` object.
### Response:
def create_pg_notify_event(notif):
"""A factory for creating a Postgres Notification Event
(an object inheriting from `cnxpublishing.events.PGNotifyEvent`)
given `notif`, a `psycopg2.extensions.Notify` object.
"""
# TODO Lookup registered events via getAllUtilitiesRegisteredFor
# for class mapping.
if notif.channel not in _CHANNEL_MAPPER:
cls = _CHANNEL_MAPPER[None]
else:
cls = _CHANNEL_MAPPER[notif.channel]
return cls(notif) |
def int_attribute(element, attribute, default=0):
"""
Returns the int value of an attribute, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param attribute: The name of the attribute to evaluate
:type attribute: basestring
:param default: The default value to return if the attribute is not defined
:type default: int
:rtype: int
"""
attribute_value = element.get(attribute)
if attribute_value:
try:
return int(attribute_value)
except (TypeError, ValueError):
return default
return default | Returns the int value of an attribute, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param attribute: The name of the attribute to evaluate
:type attribute: basestring
:param default: The default value to return if the attribute is not defined
:type default: int
:rtype: int | Below is the the instruction that describes the task:
### Input:
Returns the int value of an attribute, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param attribute: The name of the attribute to evaluate
:type attribute: basestring
:param default: The default value to return if the attribute is not defined
:type default: int
:rtype: int
### Response:
def int_attribute(element, attribute, default=0):
"""
Returns the int value of an attribute, or a default if it's not defined
:param element: The XML Element object
:type element: etree._Element
:param attribute: The name of the attribute to evaluate
:type attribute: basestring
:param default: The default value to return if the attribute is not defined
:type default: int
:rtype: int
"""
attribute_value = element.get(attribute)
if attribute_value:
try:
return int(attribute_value)
except (TypeError, ValueError):
return default
return default |
def extract_time(match):
"""extract time from a time_re match."""
hour = int(match.group('hour'))
minute = int(match.group('minute'))
return dt.time(hour, minute) | extract time from a time_re match. | Below is the the instruction that describes the task:
### Input:
extract time from a time_re match.
### Response:
def extract_time(match):
"""extract time from a time_re match."""
hour = int(match.group('hour'))
minute = int(match.group('minute'))
return dt.time(hour, minute) |
def RecordEvent(self, metric_name, value, fields=None):
"""See base class."""
self._event_metrics[metric_name].Record(value, fields) | See base class. | Below is the the instruction that describes the task:
### Input:
See base class.
### Response:
def RecordEvent(self, metric_name, value, fields=None):
"""See base class."""
self._event_metrics[metric_name].Record(value, fields) |
async def put(self, path=''):
"""Publish a notebook on a given path.
The payload directly matches the contents API for PUT.
"""
self.log.info("Attempt publishing to %s", path)
if path == '' or path == '/':
raise web.HTTPError(400, "Must provide a path for publishing")
model = self.get_json_body()
if model:
await self._publish(model, path.lstrip('/'))
else:
raise web.HTTPError(400, "Cannot publish an empty model") | Publish a notebook on a given path.
The payload directly matches the contents API for PUT. | Below is the the instruction that describes the task:
### Input:
Publish a notebook on a given path.
The payload directly matches the contents API for PUT.
### Response:
async def put(self, path=''):
"""Publish a notebook on a given path.
The payload directly matches the contents API for PUT.
"""
self.log.info("Attempt publishing to %s", path)
if path == '' or path == '/':
raise web.HTTPError(400, "Must provide a path for publishing")
model = self.get_json_body()
if model:
await self._publish(model, path.lstrip('/'))
else:
raise web.HTTPError(400, "Cannot publish an empty model") |
def find_any_reports(self, usage_page = 0, usage_id = 0):
"""Find any report type referencing HID usage control/data item.
Results are returned in a dictionary mapping report_type to usage
lists.
"""
items = [
(HidP_Input, self.find_input_reports(usage_page, usage_id)),
(HidP_Output, self.find_output_reports(usage_page, usage_id)),
(HidP_Feature, self.find_feature_reports(usage_page, usage_id)),
]
return dict([(t, r) for t, r in items if r]) | Find any report type referencing HID usage control/data item.
Results are returned in a dictionary mapping report_type to usage
lists. | Below is the the instruction that describes the task:
### Input:
Find any report type referencing HID usage control/data item.
Results are returned in a dictionary mapping report_type to usage
lists.
### Response:
def find_any_reports(self, usage_page = 0, usage_id = 0):
"""Find any report type referencing HID usage control/data item.
Results are returned in a dictionary mapping report_type to usage
lists.
"""
items = [
(HidP_Input, self.find_input_reports(usage_page, usage_id)),
(HidP_Output, self.find_output_reports(usage_page, usage_id)),
(HidP_Feature, self.find_feature_reports(usage_page, usage_id)),
]
return dict([(t, r) for t, r in items if r]) |
def project_usls_on_dictionary(usls, allowed_terms=None):
"""`usls` is an iterable of usl.
return a mapping term -> usl list
"""
cells_to_usls = defaultdict(set)
tables = set()
for u in usls:
for t in u.objects(Term):
for c in t.singular_sequences:
# This is the first time we meet the cell c
if not cells_to_usls[c]:
tables.update(c.relations.contained)
cells_to_usls[c].add(u)
if allowed_terms:
allowed_terms = set(allowed_terms)
tables = tables & allowed_terms
cells_to_usls = {c: l for c, l in cells_to_usls.items() if c in allowed_terms}
tables_to_usls = {
table: list(set(u for c in table.singular_sequences for u in cells_to_usls[c]))
for table in tables if not isinstance(table, TableSet)
}
return tables_to_usls | `usls` is an iterable of usl.
return a mapping term -> usl list | Below is the the instruction that describes the task:
### Input:
`usls` is an iterable of usl.
return a mapping term -> usl list
### Response:
def project_usls_on_dictionary(usls, allowed_terms=None):
"""`usls` is an iterable of usl.
return a mapping term -> usl list
"""
cells_to_usls = defaultdict(set)
tables = set()
for u in usls:
for t in u.objects(Term):
for c in t.singular_sequences:
# This is the first time we meet the cell c
if not cells_to_usls[c]:
tables.update(c.relations.contained)
cells_to_usls[c].add(u)
if allowed_terms:
allowed_terms = set(allowed_terms)
tables = tables & allowed_terms
cells_to_usls = {c: l for c, l in cells_to_usls.items() if c in allowed_terms}
tables_to_usls = {
table: list(set(u for c in table.singular_sequences for u in cells_to_usls[c]))
for table in tables if not isinstance(table, TableSet)
}
return tables_to_usls |
def count(self, flag_message, padding=None, force=False):
""" Log Level: :attr:COUNT
@flag_message: time-like #float
@padding: #str 'top', 'bottom' or 'all', adds a new line to the
specified area with :func:padd
@force: #bool whether or not to force the message to log in spite
of the assigned log level
..
from vital.debug import Logg
logg = Logg(loglevel="v")
logg("Total apps").count(3)
# Total apps (3)
logg().count([0, 1, 2, 3])
# (4)
..
"""
if self.should_log(self.COUNT) or force:
flag_message = flag_message \
if isinstance(flag_message, (int, float)) else \
str(len(flag_message))
self._print_message(
flag_message=flag_message, padding=padding, reverse=True,
color=colors.timing_color) | Log Level: :attr:COUNT
@flag_message: time-like #float
@padding: #str 'top', 'bottom' or 'all', adds a new line to the
specified area with :func:padd
@force: #bool whether or not to force the message to log in spite
of the assigned log level
..
from vital.debug import Logg
logg = Logg(loglevel="v")
logg("Total apps").count(3)
# Total apps (3)
logg().count([0, 1, 2, 3])
# (4)
.. | Below is the the instruction that describes the task:
### Input:
Log Level: :attr:COUNT
@flag_message: time-like #float
@padding: #str 'top', 'bottom' or 'all', adds a new line to the
specified area with :func:padd
@force: #bool whether or not to force the message to log in spite
of the assigned log level
..
from vital.debug import Logg
logg = Logg(loglevel="v")
logg("Total apps").count(3)
# Total apps (3)
logg().count([0, 1, 2, 3])
# (4)
..
### Response:
def count(self, flag_message, padding=None, force=False):
""" Log Level: :attr:COUNT
@flag_message: time-like #float
@padding: #str 'top', 'bottom' or 'all', adds a new line to the
specified area with :func:padd
@force: #bool whether or not to force the message to log in spite
of the assigned log level
..
from vital.debug import Logg
logg = Logg(loglevel="v")
logg("Total apps").count(3)
# Total apps (3)
logg().count([0, 1, 2, 3])
# (4)
..
"""
if self.should_log(self.COUNT) or force:
flag_message = flag_message \
if isinstance(flag_message, (int, float)) else \
str(len(flag_message))
self._print_message(
flag_message=flag_message, padding=padding, reverse=True,
color=colors.timing_color) |
def add_texts(self, reference_id, texts):
"""\
Adds the words from the provided iterable `texts` to the corpus.
The strings will be tokenized.
`reference_id`
The reference identifier of the cable.
`texts`
An iterable of strings.
"""
self.add_words(reference_id, chain(*(self._tokenize(t) for t in texts))) | \
Adds the words from the provided iterable `texts` to the corpus.
The strings will be tokenized.
`reference_id`
The reference identifier of the cable.
`texts`
An iterable of strings. | Below is the the instruction that describes the task:
### Input:
\
Adds the words from the provided iterable `texts` to the corpus.
The strings will be tokenized.
`reference_id`
The reference identifier of the cable.
`texts`
An iterable of strings.
### Response:
def add_texts(self, reference_id, texts):
"""\
Adds the words from the provided iterable `texts` to the corpus.
The strings will be tokenized.
`reference_id`
The reference identifier of the cable.
`texts`
An iterable of strings.
"""
self.add_words(reference_id, chain(*(self._tokenize(t) for t in texts))) |
def _eq(self, T, P):
"""Procedure for calculate the composition in saturation state
Parameters
----------
T : float
Temperature [K]
P : float
Pressure [MPa]
Returns
-------
Asat : float
Saturation mass fraction of dry air in humid air [kg/kg]
"""
if T <= 273.16:
ice = _Ice(T, P)
gw = ice["g"]
else:
water = IAPWS95(T=T, P=P)
gw = water.g
def f(parr):
rho, a = parr
if a > 1:
a = 1
fa = self._fav(T, rho, a)
muw = fa["fir"]+rho*fa["fird"]-a*fa["fira"]
return gw-muw, rho**2*fa["fird"]/1000-P
rinput = fsolve(f, [1, 0.95], full_output=True)
Asat = rinput[0][1]
return Asat | Procedure for calculate the composition in saturation state
Parameters
----------
T : float
Temperature [K]
P : float
Pressure [MPa]
Returns
-------
Asat : float
Saturation mass fraction of dry air in humid air [kg/kg] | Below is the the instruction that describes the task:
### Input:
Procedure for calculate the composition in saturation state
Parameters
----------
T : float
Temperature [K]
P : float
Pressure [MPa]
Returns
-------
Asat : float
Saturation mass fraction of dry air in humid air [kg/kg]
### Response:
def _eq(self, T, P):
"""Procedure for calculate the composition in saturation state
Parameters
----------
T : float
Temperature [K]
P : float
Pressure [MPa]
Returns
-------
Asat : float
Saturation mass fraction of dry air in humid air [kg/kg]
"""
if T <= 273.16:
ice = _Ice(T, P)
gw = ice["g"]
else:
water = IAPWS95(T=T, P=P)
gw = water.g
def f(parr):
rho, a = parr
if a > 1:
a = 1
fa = self._fav(T, rho, a)
muw = fa["fir"]+rho*fa["fird"]-a*fa["fira"]
return gw-muw, rho**2*fa["fird"]/1000-P
rinput = fsolve(f, [1, 0.95], full_output=True)
Asat = rinput[0][1]
return Asat |
def get(self, sid):
"""
Constructs a CredentialContext
:param sid: The unique id that identifies the resource to fetch.
:returns: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
:rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
"""
return CredentialContext(
self._version,
account_sid=self._solution['account_sid'],
credential_list_sid=self._solution['credential_list_sid'],
sid=sid,
) | Constructs a CredentialContext
:param sid: The unique id that identifies the resource to fetch.
:returns: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
:rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext | Below is the the instruction that describes the task:
### Input:
Constructs a CredentialContext
:param sid: The unique id that identifies the resource to fetch.
:returns: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
:rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
### Response:
def get(self, sid):
"""
Constructs a CredentialContext
:param sid: The unique id that identifies the resource to fetch.
:returns: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
:rtype: twilio.rest.api.v2010.account.sip.credential_list.credential.CredentialContext
"""
return CredentialContext(
self._version,
account_sid=self._solution['account_sid'],
credential_list_sid=self._solution['credential_list_sid'],
sid=sid,
) |
def __last_commit(self):
"""
Retrieve the most recent commit message (with ``svn info``)
Returns:
tuple: (datestr, (revno, user, None, desc))
$ svn info
Path: .
URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python
Repository Root: http://python-dlp.googlecode.com/svn
Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d
Revision: 378
Node Kind: directory
Schedule: normal
Last Changed Author: chimezie
Last Changed Rev: 378
Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011)
"""
cmd = ['svn', 'info']
op = self.sh(cmd, shell=False)
if not op:
return None
author, rev, datestr = op.split('\n')[7:10]
author = author.split(': ', 1)[1].strip()
rev = rev.split(': ', 1)[1].strip()
datestr = datestr.split(': ', 1)[1].split('(', 1)[0].strip()
return datestr, (rev, author, None, None) | Retrieve the most recent commit message (with ``svn info``)
Returns:
tuple: (datestr, (revno, user, None, desc))
$ svn info
Path: .
URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python
Repository Root: http://python-dlp.googlecode.com/svn
Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d
Revision: 378
Node Kind: directory
Schedule: normal
Last Changed Author: chimezie
Last Changed Rev: 378
Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011) | Below is the the instruction that describes the task:
### Input:
Retrieve the most recent commit message (with ``svn info``)
Returns:
tuple: (datestr, (revno, user, None, desc))
$ svn info
Path: .
URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python
Repository Root: http://python-dlp.googlecode.com/svn
Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d
Revision: 378
Node Kind: directory
Schedule: normal
Last Changed Author: chimezie
Last Changed Rev: 378
Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011)
### Response:
def __last_commit(self):
"""
Retrieve the most recent commit message (with ``svn info``)
Returns:
tuple: (datestr, (revno, user, None, desc))
$ svn info
Path: .
URL: http://python-dlp.googlecode.com/svn/trunk/layercake-python
Repository Root: http://python-dlp.googlecode.com/svn
Repository UUID: d0ad5f6e-b329-0410-b51c-492c9c4f233d
Revision: 378
Node Kind: directory
Schedule: normal
Last Changed Author: chimezie
Last Changed Rev: 378
Last Changed Date: 2011-05-01 01:31:38 -0500 (Sun, 01 May 2011)
"""
cmd = ['svn', 'info']
op = self.sh(cmd, shell=False)
if not op:
return None
author, rev, datestr = op.split('\n')[7:10]
author = author.split(': ', 1)[1].strip()
rev = rev.split(': ', 1)[1].strip()
datestr = datestr.split(': ', 1)[1].split('(', 1)[0].strip()
return datestr, (rev, author, None, None) |
def translate_output_properties(res: 'Resource', output: Any) -> Any:
"""
Recursively rewrite keys of objects returned by the engine to conform with a naming
convention specified by the resource's implementation of `translate_output_property`.
If output is a `dict`, every key is translated using `translate_output_property` while every value is transformed
by recursing.
If output is a `list`, every value is recursively transformed.
If output is a primitive (i.e. not a dict or list), the value is returned without modification.
"""
if isinstance(output, dict):
return {res.translate_output_property(k): translate_output_properties(res, v) for k, v in output.items()}
if isinstance(output, list):
return [translate_output_properties(res, v) for v in output]
return output | Recursively rewrite keys of objects returned by the engine to conform with a naming
convention specified by the resource's implementation of `translate_output_property`.
If output is a `dict`, every key is translated using `translate_output_property` while every value is transformed
by recursing.
If output is a `list`, every value is recursively transformed.
If output is a primitive (i.e. not a dict or list), the value is returned without modification. | Below is the the instruction that describes the task:
### Input:
Recursively rewrite keys of objects returned by the engine to conform with a naming
convention specified by the resource's implementation of `translate_output_property`.
If output is a `dict`, every key is translated using `translate_output_property` while every value is transformed
by recursing.
If output is a `list`, every value is recursively transformed.
If output is a primitive (i.e. not a dict or list), the value is returned without modification.
### Response:
def translate_output_properties(res: 'Resource', output: Any) -> Any:
"""
Recursively rewrite keys of objects returned by the engine to conform with a naming
convention specified by the resource's implementation of `translate_output_property`.
If output is a `dict`, every key is translated using `translate_output_property` while every value is transformed
by recursing.
If output is a `list`, every value is recursively transformed.
If output is a primitive (i.e. not a dict or list), the value is returned without modification.
"""
if isinstance(output, dict):
return {res.translate_output_property(k): translate_output_properties(res, v) for k, v in output.items()}
if isinstance(output, list):
return [translate_output_properties(res, v) for v in output]
return output |
def retrieve(self, *args, **kwargs):
"""Retrieve the permsission function for the provided things.
"""
lookup, key = self._lookup(*args, **kwargs)
return lookup[key] | Retrieve the permsission function for the provided things. | Below is the the instruction that describes the task:
### Input:
Retrieve the permsission function for the provided things.
### Response:
def retrieve(self, *args, **kwargs):
"""Retrieve the permsission function for the provided things.
"""
lookup, key = self._lookup(*args, **kwargs)
return lookup[key] |
def elog(exc, func, args=None, kwargs=None, str=str, pretty=True, name=''):
'''For logging exception-raising function invocations during randomized unit tests.
'''
from .str import safe_str
args = args if args else ()
kwargs = kwargs if kwargs else {}
name = '{}.{}'.format(get_mod(func), name) if name else full_funcname(func)
if pretty:
invocation = ', '.join([safe_str(arg) for arg in args])
if kwargs:
invocation += ', '
invocation += ', '.join(['{}={}'.format(key, safe_str(value))
for key, value in sorted(kwargs.items())])
else:
invocation = 'args={}, kwargs={}'.format(safe_str(args), safe_str(kwargs))
msg = '***{}***: "{}" --- {}({})'.format(get_typename(exc),
message(exc),
name,
invocation)
elogger.error(msg) | For logging exception-raising function invocations during randomized unit tests. | Below is the the instruction that describes the task:
### Input:
For logging exception-raising function invocations during randomized unit tests.
### Response:
def elog(exc, func, args=None, kwargs=None, str=str, pretty=True, name=''):
'''For logging exception-raising function invocations during randomized unit tests.
'''
from .str import safe_str
args = args if args else ()
kwargs = kwargs if kwargs else {}
name = '{}.{}'.format(get_mod(func), name) if name else full_funcname(func)
if pretty:
invocation = ', '.join([safe_str(arg) for arg in args])
if kwargs:
invocation += ', '
invocation += ', '.join(['{}={}'.format(key, safe_str(value))
for key, value in sorted(kwargs.items())])
else:
invocation = 'args={}, kwargs={}'.format(safe_str(args), safe_str(kwargs))
msg = '***{}***: "{}" --- {}({})'.format(get_typename(exc),
message(exc),
name,
invocation)
elogger.error(msg) |
def process_api_config_response(self, config_json):
"""Parses a JSON API config and registers methods for dispatch.
Side effects:
Parses method name, etc. for all methods and updates the indexing
data structures with the information.
Args:
config_json: A dict, the JSON body of the getApiConfigs response.
"""
with self._config_lock:
self._add_discovery_config()
for config in config_json.get('items', []):
lookup_key = config.get('name', ''), config.get('version', '')
self._configs[lookup_key] = config
for config in self._configs.itervalues():
name = config.get('name', '')
api_version = config.get('api_version', '')
path_version = config.get('path_version', '')
sorted_methods = self._get_sorted_methods(config.get('methods', {}))
for method_name, method in sorted_methods:
self._save_rest_method(method_name, name, path_version, method) | Parses a JSON API config and registers methods for dispatch.
Side effects:
Parses method name, etc. for all methods and updates the indexing
data structures with the information.
Args:
config_json: A dict, the JSON body of the getApiConfigs response. | Below is the the instruction that describes the task:
### Input:
Parses a JSON API config and registers methods for dispatch.
Side effects:
Parses method name, etc. for all methods and updates the indexing
data structures with the information.
Args:
config_json: A dict, the JSON body of the getApiConfigs response.
### Response:
def process_api_config_response(self, config_json):
"""Parses a JSON API config and registers methods for dispatch.
Side effects:
Parses method name, etc. for all methods and updates the indexing
data structures with the information.
Args:
config_json: A dict, the JSON body of the getApiConfigs response.
"""
with self._config_lock:
self._add_discovery_config()
for config in config_json.get('items', []):
lookup_key = config.get('name', ''), config.get('version', '')
self._configs[lookup_key] = config
for config in self._configs.itervalues():
name = config.get('name', '')
api_version = config.get('api_version', '')
path_version = config.get('path_version', '')
sorted_methods = self._get_sorted_methods(config.get('methods', {}))
for method_name, method in sorted_methods:
self._save_rest_method(method_name, name, path_version, method) |
def load_class_by_name(name: str):
"""Given a dotted path, returns the class"""
mod_path, _, cls_name = name.rpartition('.')
mod = importlib.import_module(mod_path)
cls = getattr(mod, cls_name)
return cls | Given a dotted path, returns the class | Below is the the instruction that describes the task:
### Input:
Given a dotted path, returns the class
### Response:
def load_class_by_name(name: str):
"""Given a dotted path, returns the class"""
mod_path, _, cls_name = name.rpartition('.')
mod = importlib.import_module(mod_path)
cls = getattr(mod, cls_name)
return cls |
def export(self, filename='element.zip'):
"""
Export this element.
Usage::
engine = Engine('myfirewall')
extask = engine.export(filename='fooexport.zip')
while not extask.done():
extask.wait(3)
print("Finished download task: %s" % extask.message())
print("File downloaded to: %s" % extask.filename)
:param str filename: filename to store exported element
:raises TaskRunFailed: invalid permissions, invalid directory, or this
element is a system element and cannot be exported.
:return: DownloadTask
.. note:: It is not possible to export system elements
"""
from smc.administration.tasks import Task
return Task.download(self, 'export', filename) | Export this element.
Usage::
engine = Engine('myfirewall')
extask = engine.export(filename='fooexport.zip')
while not extask.done():
extask.wait(3)
print("Finished download task: %s" % extask.message())
print("File downloaded to: %s" % extask.filename)
:param str filename: filename to store exported element
:raises TaskRunFailed: invalid permissions, invalid directory, or this
element is a system element and cannot be exported.
:return: DownloadTask
.. note:: It is not possible to export system elements | Below is the the instruction that describes the task:
### Input:
Export this element.
Usage::
engine = Engine('myfirewall')
extask = engine.export(filename='fooexport.zip')
while not extask.done():
extask.wait(3)
print("Finished download task: %s" % extask.message())
print("File downloaded to: %s" % extask.filename)
:param str filename: filename to store exported element
:raises TaskRunFailed: invalid permissions, invalid directory, or this
element is a system element and cannot be exported.
:return: DownloadTask
.. note:: It is not possible to export system elements
### Response:
def export(self, filename='element.zip'):
"""
Export this element.
Usage::
engine = Engine('myfirewall')
extask = engine.export(filename='fooexport.zip')
while not extask.done():
extask.wait(3)
print("Finished download task: %s" % extask.message())
print("File downloaded to: %s" % extask.filename)
:param str filename: filename to store exported element
:raises TaskRunFailed: invalid permissions, invalid directory, or this
element is a system element and cannot be exported.
:return: DownloadTask
.. note:: It is not possible to export system elements
"""
from smc.administration.tasks import Task
return Task.download(self, 'export', filename) |
def order_limit_buy(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit buy order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY,
})
return self.order_limit(timeInForce=timeInForce, **params) | Send in a new limit buy order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException | Below is the the instruction that describes the task:
### Input:
Send in a new limit buy order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
### Response:
def order_limit_buy(self, timeInForce=TIME_IN_FORCE_GTC, **params):
"""Send in a new limit buy order
Any order with an icebergQty MUST have timeInForce set to GTC.
:param symbol: required
:type symbol: str
:param quantity: required
:type quantity: decimal
:param price: required
:type price: str
:param timeInForce: default Good till cancelled
:type timeInForce: str
:param newClientOrderId: A unique id for the order. Automatically generated if not sent.
:type newClientOrderId: str
:param stopPrice: Used with stop orders
:type stopPrice: decimal
:param icebergQty: Used with iceberg orders
:type icebergQty: decimal
:param newOrderRespType: Set the response JSON. ACK, RESULT, or FULL; default: RESULT.
:type newOrderRespType: str
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
See order endpoint for full response options
:raises: BinanceRequestException, BinanceAPIException, BinanceOrderException, BinanceOrderMinAmountException, BinanceOrderMinPriceException, BinanceOrderMinTotalException, BinanceOrderUnknownSymbolException, BinanceOrderInactiveSymbolException
"""
params.update({
'side': self.SIDE_BUY,
})
return self.order_limit(timeInForce=timeInForce, **params) |
def get_process(cmd):
"""Get a command process."""
if sys.platform.startswith('win'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(
cmd,
startupinfo=startupinfo,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
shell=False
)
else:
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
shell=False
)
return process | Get a command process. | Below is the the instruction that describes the task:
### Input:
Get a command process.
### Response:
def get_process(cmd):
"""Get a command process."""
if sys.platform.startswith('win'):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
process = subprocess.Popen(
cmd,
startupinfo=startupinfo,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
shell=False
)
else:
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
shell=False
)
return process |
def bovy_print(fig_width=5,fig_height=5,axes_labelsize=16,
text_fontsize=11,legend_fontsize=12,
xtick_labelsize=10,ytick_labelsize=10,
xtick_minor_size=2,ytick_minor_size=2,
xtick_major_size=4,ytick_major_size=4):
"""
NAME:
bovy_print
PURPOSE:
setup a figure for plotting
INPUT:
fig_width - width in inches
fig_height - height in inches
axes_labelsize - size of the axis-labels
text_fontsize - font-size of the text (if any)
legend_fontsize - font-size of the legend (if any)
xtick_labelsize - size of the x-axis labels
ytick_labelsize - size of the y-axis labels
xtick_minor_size - size of the minor x-ticks
ytick_minor_size - size of the minor y-ticks
OUTPUT:
(none)
HISTORY:
2009-12-23 - Written - Bovy (NYU)
"""
fig_size = [fig_width,fig_height]
params = {'axes.labelsize': axes_labelsize,
'font.size': text_fontsize,
'legend.fontsize': legend_fontsize,
'xtick.labelsize':xtick_labelsize,
'ytick.labelsize':ytick_labelsize,
'text.usetex': True,
'figure.figsize': fig_size,
'xtick.major.size' : xtick_major_size,
'ytick.major.size' : ytick_major_size,
'xtick.minor.size' : xtick_minor_size,
'ytick.minor.size' : ytick_minor_size,
'legend.numpoints':1,
'xtick.top': True,
'xtick.direction': 'in',
'ytick.right': True,
'ytick.direction': 'in'}
pyplot.rcParams.update(params)
rc('text.latex', preamble=r'\usepackage{amsmath}'+'\n'
+r'\usepackage{amssymb}') | NAME:
bovy_print
PURPOSE:
setup a figure for plotting
INPUT:
fig_width - width in inches
fig_height - height in inches
axes_labelsize - size of the axis-labels
text_fontsize - font-size of the text (if any)
legend_fontsize - font-size of the legend (if any)
xtick_labelsize - size of the x-axis labels
ytick_labelsize - size of the y-axis labels
xtick_minor_size - size of the minor x-ticks
ytick_minor_size - size of the minor y-ticks
OUTPUT:
(none)
HISTORY:
2009-12-23 - Written - Bovy (NYU) | Below is the the instruction that describes the task:
### Input:
NAME:
bovy_print
PURPOSE:
setup a figure for plotting
INPUT:
fig_width - width in inches
fig_height - height in inches
axes_labelsize - size of the axis-labels
text_fontsize - font-size of the text (if any)
legend_fontsize - font-size of the legend (if any)
xtick_labelsize - size of the x-axis labels
ytick_labelsize - size of the y-axis labels
xtick_minor_size - size of the minor x-ticks
ytick_minor_size - size of the minor y-ticks
OUTPUT:
(none)
HISTORY:
2009-12-23 - Written - Bovy (NYU)
### Response:
def bovy_print(fig_width=5,fig_height=5,axes_labelsize=16,
text_fontsize=11,legend_fontsize=12,
xtick_labelsize=10,ytick_labelsize=10,
xtick_minor_size=2,ytick_minor_size=2,
xtick_major_size=4,ytick_major_size=4):
"""
NAME:
bovy_print
PURPOSE:
setup a figure for plotting
INPUT:
fig_width - width in inches
fig_height - height in inches
axes_labelsize - size of the axis-labels
text_fontsize - font-size of the text (if any)
legend_fontsize - font-size of the legend (if any)
xtick_labelsize - size of the x-axis labels
ytick_labelsize - size of the y-axis labels
xtick_minor_size - size of the minor x-ticks
ytick_minor_size - size of the minor y-ticks
OUTPUT:
(none)
HISTORY:
2009-12-23 - Written - Bovy (NYU)
"""
fig_size = [fig_width,fig_height]
params = {'axes.labelsize': axes_labelsize,
'font.size': text_fontsize,
'legend.fontsize': legend_fontsize,
'xtick.labelsize':xtick_labelsize,
'ytick.labelsize':ytick_labelsize,
'text.usetex': True,
'figure.figsize': fig_size,
'xtick.major.size' : xtick_major_size,
'ytick.major.size' : ytick_major_size,
'xtick.minor.size' : xtick_minor_size,
'ytick.minor.size' : ytick_minor_size,
'legend.numpoints':1,
'xtick.top': True,
'xtick.direction': 'in',
'ytick.right': True,
'ytick.direction': 'in'}
pyplot.rcParams.update(params)
rc('text.latex', preamble=r'\usepackage{amsmath}'+'\n'
+r'\usepackage{amssymb}') |
def save_base_map(filename, grouped_by_text):
"""Dump a list of agents along with groundings and counts into a csv file
Parameters
----------
filename : str
Filepath for output file
grouped_by_text : list of tuple
List of tuples of the form output by agent_texts_with_grounding
"""
rows = []
for group in grouped_by_text:
text_string = group[0]
for db, db_id, count in group[1]:
if db == 'UP':
name = uniprot_client.get_mnemonic(db_id)
else:
name = ''
row = [text_string, db, db_id, count, name]
rows.append(row)
write_unicode_csv(filename, rows, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n') | Dump a list of agents along with groundings and counts into a csv file
Parameters
----------
filename : str
Filepath for output file
grouped_by_text : list of tuple
List of tuples of the form output by agent_texts_with_grounding | Below is the the instruction that describes the task:
### Input:
Dump a list of agents along with groundings and counts into a csv file
Parameters
----------
filename : str
Filepath for output file
grouped_by_text : list of tuple
List of tuples of the form output by agent_texts_with_grounding
### Response:
def save_base_map(filename, grouped_by_text):
"""Dump a list of agents along with groundings and counts into a csv file
Parameters
----------
filename : str
Filepath for output file
grouped_by_text : list of tuple
List of tuples of the form output by agent_texts_with_grounding
"""
rows = []
for group in grouped_by_text:
text_string = group[0]
for db, db_id, count in group[1]:
if db == 'UP':
name = uniprot_client.get_mnemonic(db_id)
else:
name = ''
row = [text_string, db, db_id, count, name]
rows.append(row)
write_unicode_csv(filename, rows, delimiter=',', quotechar='"',
quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n') |
def cudaMemcpy_dtoh(dst, src, count):
"""
Copy memory from device to host.
Copy data from device memory to host memory.
Parameters
----------
dst : ctypes pointer
Host memory pointer.
src : ctypes pointer
Device memory pointer.
count : int
Number of bytes to copy.
"""
status = _libcudart.cudaMemcpy(dst, src,
ctypes.c_size_t(count),
cudaMemcpyDeviceToHost)
cudaCheckStatus(status) | Copy memory from device to host.
Copy data from device memory to host memory.
Parameters
----------
dst : ctypes pointer
Host memory pointer.
src : ctypes pointer
Device memory pointer.
count : int
Number of bytes to copy. | Below is the the instruction that describes the task:
### Input:
Copy memory from device to host.
Copy data from device memory to host memory.
Parameters
----------
dst : ctypes pointer
Host memory pointer.
src : ctypes pointer
Device memory pointer.
count : int
Number of bytes to copy.
### Response:
def cudaMemcpy_dtoh(dst, src, count):
"""
Copy memory from device to host.
Copy data from device memory to host memory.
Parameters
----------
dst : ctypes pointer
Host memory pointer.
src : ctypes pointer
Device memory pointer.
count : int
Number of bytes to copy.
"""
status = _libcudart.cudaMemcpy(dst, src,
ctypes.c_size_t(count),
cudaMemcpyDeviceToHost)
cudaCheckStatus(status) |
def run(self, func=None):
"""
Evaluates the packaged function as func(*self.args,**self.kwargs)
If func is a method of an object, it's accessed as getattr(self.obj,__name__).
If it's a user-defined function, it needs to be passed in here because it can't
be serialized.
Returns:
object: function's return value
"""
to_run = self.prepare_namespace(func)
result = to_run(*self.args, **self.kwargs)
return result | Evaluates the packaged function as func(*self.args,**self.kwargs)
If func is a method of an object, it's accessed as getattr(self.obj,__name__).
If it's a user-defined function, it needs to be passed in here because it can't
be serialized.
Returns:
object: function's return value | Below is the the instruction that describes the task:
### Input:
Evaluates the packaged function as func(*self.args,**self.kwargs)
If func is a method of an object, it's accessed as getattr(self.obj,__name__).
If it's a user-defined function, it needs to be passed in here because it can't
be serialized.
Returns:
object: function's return value
### Response:
def run(self, func=None):
"""
Evaluates the packaged function as func(*self.args,**self.kwargs)
If func is a method of an object, it's accessed as getattr(self.obj,__name__).
If it's a user-defined function, it needs to be passed in here because it can't
be serialized.
Returns:
object: function's return value
"""
to_run = self.prepare_namespace(func)
result = to_run(*self.args, **self.kwargs)
return result |
def add_ref(self, name, ref):
""" Add a reference for the backend object that gives access
to the low level context. Used in vispy.app.canvas.backends.
The given name must match with that of previously added
references.
"""
if self._name is None:
self._name = name
elif name != self._name:
raise RuntimeError('Contexts can only share between backends of '
'the same type')
self._refs.append(weakref.ref(ref)) | Add a reference for the backend object that gives access
to the low level context. Used in vispy.app.canvas.backends.
The given name must match with that of previously added
references. | Below is the the instruction that describes the task:
### Input:
Add a reference for the backend object that gives access
to the low level context. Used in vispy.app.canvas.backends.
The given name must match with that of previously added
references.
### Response:
def add_ref(self, name, ref):
""" Add a reference for the backend object that gives access
to the low level context. Used in vispy.app.canvas.backends.
The given name must match with that of previously added
references.
"""
if self._name is None:
self._name = name
elif name != self._name:
raise RuntimeError('Contexts can only share between backends of '
'the same type')
self._refs.append(weakref.ref(ref)) |
def _updateTransitionMatrix(self):
"""
Updates the hidden-state transition matrix and the initial distribution
"""
# TRANSITION MATRIX
C = self.model.count_matrix() + self.prior_C # posterior count matrix
# check if we work with these options
if self.reversible and not _tmatrix_disconnected.is_connected(C, strong=True):
raise NotImplementedError('Encountered disconnected count matrix with sampling option reversible:\n '
+ str(C) + '\nUse prior to ensure connectivity or use reversible=False.')
# ensure consistent sparsity pattern (P0 might have additional zeros because of underflows)
# TODO: these steps work around a bug in msmtools. Should be fixed there
P0 = msmest.transition_matrix(C, reversible=self.reversible, maxiter=10000, warn_not_converged=False)
zeros = np.where(P0 + P0.T == 0)
C[zeros] = 0
# run sampler
Tij = msmest.sample_tmatrix(C, nsample=1, nsteps=self.transition_matrix_sampling_steps,
reversible=self.reversible)
# INITIAL DISTRIBUTION
if self.stationary: # p0 is consistent with P
p0 = _tmatrix_disconnected.stationary_distribution(Tij, C=C)
else:
n0 = self.model.count_init().astype(float)
first_timestep_counts_with_prior = n0 + self.prior_n0
positive = first_timestep_counts_with_prior > 0
p0 = np.zeros_like(n0)
p0[positive] = np.random.dirichlet(first_timestep_counts_with_prior[positive]) # sample p0 from posterior
# update HMM with new sample
self.model.update(p0, Tij) | Updates the hidden-state transition matrix and the initial distribution | Below is the the instruction that describes the task:
### Input:
Updates the hidden-state transition matrix and the initial distribution
### Response:
def _updateTransitionMatrix(self):
"""
Updates the hidden-state transition matrix and the initial distribution
"""
# TRANSITION MATRIX
C = self.model.count_matrix() + self.prior_C # posterior count matrix
# check if we work with these options
if self.reversible and not _tmatrix_disconnected.is_connected(C, strong=True):
raise NotImplementedError('Encountered disconnected count matrix with sampling option reversible:\n '
+ str(C) + '\nUse prior to ensure connectivity or use reversible=False.')
# ensure consistent sparsity pattern (P0 might have additional zeros because of underflows)
# TODO: these steps work around a bug in msmtools. Should be fixed there
P0 = msmest.transition_matrix(C, reversible=self.reversible, maxiter=10000, warn_not_converged=False)
zeros = np.where(P0 + P0.T == 0)
C[zeros] = 0
# run sampler
Tij = msmest.sample_tmatrix(C, nsample=1, nsteps=self.transition_matrix_sampling_steps,
reversible=self.reversible)
# INITIAL DISTRIBUTION
if self.stationary: # p0 is consistent with P
p0 = _tmatrix_disconnected.stationary_distribution(Tij, C=C)
else:
n0 = self.model.count_init().astype(float)
first_timestep_counts_with_prior = n0 + self.prior_n0
positive = first_timestep_counts_with_prior > 0
p0 = np.zeros_like(n0)
p0[positive] = np.random.dirichlet(first_timestep_counts_with_prior[positive]) # sample p0 from posterior
# update HMM with new sample
self.model.update(p0, Tij) |
def convertAsOpenMath(term, converter):
""" Converts a term into OpenMath, using either a converter or the interpretAsOpenMath method """
# if we already have openmath, or have some of our magic helpers, use interpretAsOpenMath
if hasattr(term, "_ishelper") and term._ishelper or isinstance(term, om.OMAny):
return interpretAsOpenMath(term)
# next try to convert using the converter
if converter is not None:
try:
_converted = converter.to_openmath(term)
except Exception as e:
_converted = None
if isinstance(_converted, om.OMAny):
return _converted
# fallback to the openmath helper
return interpretAsOpenMath(term) | Converts a term into OpenMath, using either a converter or the interpretAsOpenMath method | Below is the the instruction that describes the task:
### Input:
Converts a term into OpenMath, using either a converter or the interpretAsOpenMath method
### Response:
def convertAsOpenMath(term, converter):
""" Converts a term into OpenMath, using either a converter or the interpretAsOpenMath method """
# if we already have openmath, or have some of our magic helpers, use interpretAsOpenMath
if hasattr(term, "_ishelper") and term._ishelper or isinstance(term, om.OMAny):
return interpretAsOpenMath(term)
# next try to convert using the converter
if converter is not None:
try:
_converted = converter.to_openmath(term)
except Exception as e:
_converted = None
if isinstance(_converted, om.OMAny):
return _converted
# fallback to the openmath helper
return interpretAsOpenMath(term) |
def fromdict(dict):
"""Takes a dictionary as an argument and returns a new Challenge
object from the dictionary.
:param dict: the dictionary to convert
"""
seed = hb_decode(dict['seed'])
index = dict['index']
return Challenge(seed, index) | Takes a dictionary as an argument and returns a new Challenge
object from the dictionary.
:param dict: the dictionary to convert | Below is the the instruction that describes the task:
### Input:
Takes a dictionary as an argument and returns a new Challenge
object from the dictionary.
:param dict: the dictionary to convert
### Response:
def fromdict(dict):
"""Takes a dictionary as an argument and returns a new Challenge
object from the dictionary.
:param dict: the dictionary to convert
"""
seed = hb_decode(dict['seed'])
index = dict['index']
return Challenge(seed, index) |
def get_client_data(self, client):
"""
Returns a client.
"""
data = self._request('GET', '/clients/{}'.format(client))
return data.json() | Returns a client. | Below is the the instruction that describes the task:
### Input:
Returns a client.
### Response:
def get_client_data(self, client):
"""
Returns a client.
"""
data = self._request('GET', '/clients/{}'.format(client))
return data.json() |
def add_entry(self, length):
# type: (int) -> int
'''
Add a new entry to this Rock Ridge Continuation Block. This method
attempts to find a gap that fits the new length anywhere within this
Continuation Block. If successful, it returns the offset at which
it placed this entry. If unsuccessful, it returns None.
Parameters:
length - The length of the entry to find a gap for.
Returns:
The offset the entry was placed at, or None if no gap was found.
'''
offset = -1
# Need to find a gap
for index, entry in enumerate(self._entries):
if index == 0:
if entry.offset != 0 and length <= entry.offset:
# We can put it at the beginning!
offset = 0
break
else:
lastentry = self._entries[index - 1]
lastend = lastentry.offset + lastentry.length - 1
gapsize = entry.offset - lastend - 1
if gapsize >= length:
# We found a spot for it!
offset = lastend + 1
break
else:
# We reached the end without finding a gap for it. Look at the last
# entry and see if there is room at the end.
if self._entries:
lastentry = self._entries[-1]
lastend = lastentry.offset + lastentry.length - 1
left = self._max_block_size - lastend - 1
if left >= length:
offset = lastend + 1
else:
if self._max_block_size >= length:
offset = 0
if offset >= 0:
bisect.insort_left(self._entries,
RockRidgeContinuationEntry(offset, length))
return offset | Add a new entry to this Rock Ridge Continuation Block. This method
attempts to find a gap that fits the new length anywhere within this
Continuation Block. If successful, it returns the offset at which
it placed this entry. If unsuccessful, it returns None.
Parameters:
length - The length of the entry to find a gap for.
Returns:
The offset the entry was placed at, or None if no gap was found. | Below is the the instruction that describes the task:
### Input:
Add a new entry to this Rock Ridge Continuation Block. This method
attempts to find a gap that fits the new length anywhere within this
Continuation Block. If successful, it returns the offset at which
it placed this entry. If unsuccessful, it returns None.
Parameters:
length - The length of the entry to find a gap for.
Returns:
The offset the entry was placed at, or None if no gap was found.
### Response:
def add_entry(self, length):
# type: (int) -> int
'''
Add a new entry to this Rock Ridge Continuation Block. This method
attempts to find a gap that fits the new length anywhere within this
Continuation Block. If successful, it returns the offset at which
it placed this entry. If unsuccessful, it returns None.
Parameters:
length - The length of the entry to find a gap for.
Returns:
The offset the entry was placed at, or None if no gap was found.
'''
offset = -1
# Need to find a gap
for index, entry in enumerate(self._entries):
if index == 0:
if entry.offset != 0 and length <= entry.offset:
# We can put it at the beginning!
offset = 0
break
else:
lastentry = self._entries[index - 1]
lastend = lastentry.offset + lastentry.length - 1
gapsize = entry.offset - lastend - 1
if gapsize >= length:
# We found a spot for it!
offset = lastend + 1
break
else:
# We reached the end without finding a gap for it. Look at the last
# entry and see if there is room at the end.
if self._entries:
lastentry = self._entries[-1]
lastend = lastentry.offset + lastentry.length - 1
left = self._max_block_size - lastend - 1
if left >= length:
offset = lastend + 1
else:
if self._max_block_size >= length:
offset = 0
if offset >= 0:
bisect.insort_left(self._entries,
RockRidgeContinuationEntry(offset, length))
return offset |
def db_putHex(self, db_name, key, value):
"""https://github.com/ethereum/wiki/wiki/JSON-RPC#db_puthex
DEPRECATED
"""
warnings.warn('deprecated', DeprecationWarning)
if not value.startswith('0x'):
value = add_0x(value)
return (yield from self.rpc_call('db_putHex', [db_name, key, value])) | https://github.com/ethereum/wiki/wiki/JSON-RPC#db_puthex
DEPRECATED | Below is the the instruction that describes the task:
### Input:
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_puthex
DEPRECATED
### Response:
def db_putHex(self, db_name, key, value):
"""https://github.com/ethereum/wiki/wiki/JSON-RPC#db_puthex
DEPRECATED
"""
warnings.warn('deprecated', DeprecationWarning)
if not value.startswith('0x'):
value = add_0x(value)
return (yield from self.rpc_call('db_putHex', [db_name, key, value])) |
def visit_comprehension(self, node, parent):
"""visit a Comprehension node by returning a fresh instance of it"""
newnode = nodes.Comprehension(parent)
newnode.postinit(
self.visit(node.target, newnode),
self.visit(node.iter, newnode),
[self.visit(child, newnode) for child in node.ifs],
getattr(node, "is_async", None),
)
return newnode | visit a Comprehension node by returning a fresh instance of it | Below is the the instruction that describes the task:
### Input:
visit a Comprehension node by returning a fresh instance of it
### Response:
def visit_comprehension(self, node, parent):
"""visit a Comprehension node by returning a fresh instance of it"""
newnode = nodes.Comprehension(parent)
newnode.postinit(
self.visit(node.target, newnode),
self.visit(node.iter, newnode),
[self.visit(child, newnode) for child in node.ifs],
getattr(node, "is_async", None),
)
return newnode |
def check_undelivered(to=None):
"""Sends a notification email if any undelivered dispatches.
Returns undelivered (failed) dispatches count.
:param str|unicode to: Recipient address. If not set Django ADMINS setting is used.
:rtype: int
"""
failed_count = Dispatch.objects.filter(dispatch_status=Dispatch.DISPATCH_STATUS_FAILED).count()
if failed_count:
from sitemessage.shortcuts import schedule_email
from sitemessage.messages.email import EmailTextMessage
if to is None:
admins = settings.ADMINS
if admins:
to = list(dict(admins).values())
if to:
priority = 999
register_message_types(EmailTextMessage)
schedule_email(
'You have %s undelivered dispatch(es) at %s' % (failed_count, get_site_url()),
subject='[SITEMESSAGE] Undelivered dispatches',
to=to, priority=priority)
send_scheduled_messages(priority=priority)
return failed_count | Sends a notification email if any undelivered dispatches.
Returns undelivered (failed) dispatches count.
:param str|unicode to: Recipient address. If not set Django ADMINS setting is used.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Sends a notification email if any undelivered dispatches.
Returns undelivered (failed) dispatches count.
:param str|unicode to: Recipient address. If not set Django ADMINS setting is used.
:rtype: int
### Response:
def check_undelivered(to=None):
"""Sends a notification email if any undelivered dispatches.
Returns undelivered (failed) dispatches count.
:param str|unicode to: Recipient address. If not set Django ADMINS setting is used.
:rtype: int
"""
failed_count = Dispatch.objects.filter(dispatch_status=Dispatch.DISPATCH_STATUS_FAILED).count()
if failed_count:
from sitemessage.shortcuts import schedule_email
from sitemessage.messages.email import EmailTextMessage
if to is None:
admins = settings.ADMINS
if admins:
to = list(dict(admins).values())
if to:
priority = 999
register_message_types(EmailTextMessage)
schedule_email(
'You have %s undelivered dispatch(es) at %s' % (failed_count, get_site_url()),
subject='[SITEMESSAGE] Undelivered dispatches',
to=to, priority=priority)
send_scheduled_messages(priority=priority)
return failed_count |
def runSearchReads(self, request):
"""
Runs the specified SearchReadsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchReadsRequest,
protocol.SearchReadsResponse,
self.readsGenerator) | Runs the specified SearchReadsRequest. | Below is the the instruction that describes the task:
### Input:
Runs the specified SearchReadsRequest.
### Response:
def runSearchReads(self, request):
"""
Runs the specified SearchReadsRequest.
"""
return self.runSearchRequest(
request, protocol.SearchReadsRequest,
protocol.SearchReadsResponse,
self.readsGenerator) |
def g_voigt(self):
"""
returns the G_v shear modulus
"""
return (2. * self.voigt[:3, :3].trace() -
np.triu(self.voigt[:3, :3]).sum() +
3 * self.voigt[3:, 3:].trace()) / 15. | returns the G_v shear modulus | Below is the the instruction that describes the task:
### Input:
returns the G_v shear modulus
### Response:
def g_voigt(self):
"""
returns the G_v shear modulus
"""
return (2. * self.voigt[:3, :3].trace() -
np.triu(self.voigt[:3, :3]).sum() +
3 * self.voigt[3:, 3:].trace()) / 15. |
def cnst_A1(self, X, Xf=None):
r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
"""
if Xf is None:
Xf = sl.rfftn(X, axes=self.cri.axisN)
return sl.irfftn(sl.inner(
self.GDf, Xf[..., np.newaxis], axis=self.cri.axisM), self.cri.Nv,
self.cri.axisN) | r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`. | Below is the the instruction that describes the task:
### Input:
r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
### Response:
def cnst_A1(self, X, Xf=None):
r"""Compute :math:`A_1 \mathbf{x}` component of ADMM problem
constraint. In this case :math:`A_1 \mathbf{x} = (\Gamma_0^T \;\;
\Gamma_1^T \;\; \ldots )^T \mathbf{x}`.
"""
if Xf is None:
Xf = sl.rfftn(X, axes=self.cri.axisN)
return sl.irfftn(sl.inner(
self.GDf, Xf[..., np.newaxis], axis=self.cri.axisM), self.cri.Nv,
self.cri.axisN) |
def _create_cipher(self, password, salt, IV):
"""
Create the cipher object to encrypt or decrypt a payload.
"""
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Cipher import AES
pw = PBKDF2(password, salt, dkLen=self.block_size)
return AES.new(pw[:self.block_size], AES.MODE_CFB, IV) | Create the cipher object to encrypt or decrypt a payload. | Below is the the instruction that describes the task:
### Input:
Create the cipher object to encrypt or decrypt a payload.
### Response:
def _create_cipher(self, password, salt, IV):
"""
Create the cipher object to encrypt or decrypt a payload.
"""
from Crypto.Protocol.KDF import PBKDF2
from Crypto.Cipher import AES
pw = PBKDF2(password, salt, dkLen=self.block_size)
return AES.new(pw[:self.block_size], AES.MODE_CFB, IV) |
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise | Return project by short_name. | Below is the the instruction that describes the task:
### Input:
Return project by short_name.
### Response:
def find_project_by_short_name(short_name, pbclient, all=None):
"""Return project by short_name."""
try:
response = pbclient.find_project(short_name=short_name, all=all)
check_api_error(response)
if (len(response) == 0):
msg = '%s not found! You can use the all=1 argument to \
search in all the server.'
error = 'Project Not Found'
raise ProjectNotFound(msg, error)
return response[0]
except exceptions.ConnectionError:
raise
except ProjectNotFound:
raise |
def download_attachments(self):
""" Downloads this message attachments into memory.
Need a call to 'attachment.save' to save them on disk.
:return: Success / Failure
:rtype: bool
"""
if not self._parent.has_attachments:
log.debug(
'Parent {} has no attachments, skipping out early.'.format(
self._parent.__class__.__name__))
return False
if not self._parent.object_id:
raise RuntimeError(
'Attempted to download attachments of an unsaved {}'.format(
self._parent.__class__.__name__))
url = self.build_url(self._endpoints.get('attachments').format(
id=self._parent.object_id))
response = self._parent.con.get(url)
if not response:
return False
attachments = response.json().get('value', [])
# Everything received from cloud must be passed as self._cloud_data_key
self.untrack = True
self.add({self._cloud_data_key: attachments})
self.untrack = False
# TODO: when it's a item attachment the attachment itself
# is not downloaded. We must download it...
# TODO: idea: retrieve the attachments ids' only with
# select and then download one by one.
return True | Downloads this message attachments into memory.
Need a call to 'attachment.save' to save them on disk.
:return: Success / Failure
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Downloads this message attachments into memory.
Need a call to 'attachment.save' to save them on disk.
:return: Success / Failure
:rtype: bool
### Response:
def download_attachments(self):
""" Downloads this message attachments into memory.
Need a call to 'attachment.save' to save them on disk.
:return: Success / Failure
:rtype: bool
"""
if not self._parent.has_attachments:
log.debug(
'Parent {} has no attachments, skipping out early.'.format(
self._parent.__class__.__name__))
return False
if not self._parent.object_id:
raise RuntimeError(
'Attempted to download attachments of an unsaved {}'.format(
self._parent.__class__.__name__))
url = self.build_url(self._endpoints.get('attachments').format(
id=self._parent.object_id))
response = self._parent.con.get(url)
if not response:
return False
attachments = response.json().get('value', [])
# Everything received from cloud must be passed as self._cloud_data_key
self.untrack = True
self.add({self._cloud_data_key: attachments})
self.untrack = False
# TODO: when it's a item attachment the attachment itself
# is not downloaded. We must download it...
# TODO: idea: retrieve the attachments ids' only with
# select and then download one by one.
return True |
def new_ele_description(**kwargs):
'''
from elist.elist import *
from elist.jprint import pobj
root_desc = new_ele_description(leaf=False,depth=0,breadth_path=[],path=[],parent_path=[],parent_breadth_path=[])
pobj(root_desc)
#None means not handled
'''
desc = {
'leaf':None,
'depth':None,
'breadth':None,
'breadth_path':None,
'sib_seq':None,
'path':None,
'parent_path':None,
'parent_breadth_path':None,
'lsib_path':None,
'rsib_path':None,
'lcin_path':None,
'rcin_path':None,
'sons_count':None,
'leaf_son_paths':None,
'non_leaf_son_paths':None,
'leaf_descendant_paths':None,
'non_leaf_descendant_paths':None,
'flat_offset':None,
'flat_len':None
}
for key in kwargs:
desc[key.lower()] = kwargs[key]
return(desc) | from elist.elist import *
from elist.jprint import pobj
root_desc = new_ele_description(leaf=False,depth=0,breadth_path=[],path=[],parent_path=[],parent_breadth_path=[])
pobj(root_desc)
#None means not handled | Below is the the instruction that describes the task:
### Input:
from elist.elist import *
from elist.jprint import pobj
root_desc = new_ele_description(leaf=False,depth=0,breadth_path=[],path=[],parent_path=[],parent_breadth_path=[])
pobj(root_desc)
#None means not handled
### Response:
def new_ele_description(**kwargs):
'''
from elist.elist import *
from elist.jprint import pobj
root_desc = new_ele_description(leaf=False,depth=0,breadth_path=[],path=[],parent_path=[],parent_breadth_path=[])
pobj(root_desc)
#None means not handled
'''
desc = {
'leaf':None,
'depth':None,
'breadth':None,
'breadth_path':None,
'sib_seq':None,
'path':None,
'parent_path':None,
'parent_breadth_path':None,
'lsib_path':None,
'rsib_path':None,
'lcin_path':None,
'rcin_path':None,
'sons_count':None,
'leaf_son_paths':None,
'non_leaf_son_paths':None,
'leaf_descendant_paths':None,
'non_leaf_descendant_paths':None,
'flat_offset':None,
'flat_len':None
}
for key in kwargs:
desc[key.lower()] = kwargs[key]
return(desc) |
def delete_marked_communities():
"""Delete communities after holdout time."""
# TODO: Delete the community ID from all records metadata first
raise NotImplementedError()
Community.query.filter_by(
Community.delete_time > datetime.utcnow()).delete()
db.session.commit() | Delete communities after holdout time. | Below is the the instruction that describes the task:
### Input:
Delete communities after holdout time.
### Response:
def delete_marked_communities():
"""Delete communities after holdout time."""
# TODO: Delete the community ID from all records metadata first
raise NotImplementedError()
Community.query.filter_by(
Community.delete_time > datetime.utcnow()).delete()
db.session.commit() |
def error_for(response):
"""Return the appropriate initialized exception class for a response."""
klass = error_classes.get(response.status)
if klass is None:
if 400 <= response.status < 500:
klass = ClientError
if 500 <= response.status < 600:
klass = ServerError # pragma: no cover
return klass(response) | Return the appropriate initialized exception class for a response. | Below is the the instruction that describes the task:
### Input:
Return the appropriate initialized exception class for a response.
### Response:
def error_for(response):
"""Return the appropriate initialized exception class for a response."""
klass = error_classes.get(response.status)
if klass is None:
if 400 <= response.status < 500:
klass = ClientError
if 500 <= response.status < 600:
klass = ServerError # pragma: no cover
return klass(response) |
def parse(table, query=None, date=None, fields=None,
distinct=False, limit=None, alias=None):
'''
Given a SQLAlchemy Table() instance, generate a SQLAlchemy
Query() instance with the given parameters.
:param table: SQLAlchemy Table() instance
:param query: MQL query
:param date: metrique date range query
:param date: metrique date range query element
:param fields: list of field names to return as columns
:param distinct: apply DISTINCT to this query
:param limit: apply LIMIT to this query
:param alias: apply ALIAS AS to this query
'''
date = date_range(date)
limit = int(limit or -1)
if query and date:
query = '%s and %s' % (query, date)
elif date:
query = date
elif query:
pass
else: # date is null, query is not
query = None
fields = parse_fields(fields=fields) or None
# we must pass in the table column objects themselves to ensure
# our bind / result processors are mapped properly
fields = fields if fields else table.columns
msg = 'parse(query=%s, fields=%s)' % (query, fields)
#msg = re.sub(' in \[[^\]]+\]', ' in [...]', msg)
logger.debug(msg)
kwargs = {}
if query:
interpreter = MQLInterpreter(table)
query = interpreter.parse(query)
kwargs['whereclause'] = query
if distinct:
kwargs['distinct'] = distinct
query = select(fields, from_obj=table, **kwargs)
if limit >= 1:
query = query.limit(limit)
if alias:
query = query.alias(alias)
return query | Given a SQLAlchemy Table() instance, generate a SQLAlchemy
Query() instance with the given parameters.
:param table: SQLAlchemy Table() instance
:param query: MQL query
:param date: metrique date range query
:param date: metrique date range query element
:param fields: list of field names to return as columns
:param distinct: apply DISTINCT to this query
:param limit: apply LIMIT to this query
:param alias: apply ALIAS AS to this query | Below is the the instruction that describes the task:
### Input:
Given a SQLAlchemy Table() instance, generate a SQLAlchemy
Query() instance with the given parameters.
:param table: SQLAlchemy Table() instance
:param query: MQL query
:param date: metrique date range query
:param date: metrique date range query element
:param fields: list of field names to return as columns
:param distinct: apply DISTINCT to this query
:param limit: apply LIMIT to this query
:param alias: apply ALIAS AS to this query
### Response:
def parse(table, query=None, date=None, fields=None,
distinct=False, limit=None, alias=None):
'''
Given a SQLAlchemy Table() instance, generate a SQLAlchemy
Query() instance with the given parameters.
:param table: SQLAlchemy Table() instance
:param query: MQL query
:param date: metrique date range query
:param date: metrique date range query element
:param fields: list of field names to return as columns
:param distinct: apply DISTINCT to this query
:param limit: apply LIMIT to this query
:param alias: apply ALIAS AS to this query
'''
date = date_range(date)
limit = int(limit or -1)
if query and date:
query = '%s and %s' % (query, date)
elif date:
query = date
elif query:
pass
else: # date is null, query is not
query = None
fields = parse_fields(fields=fields) or None
# we must pass in the table column objects themselves to ensure
# our bind / result processors are mapped properly
fields = fields if fields else table.columns
msg = 'parse(query=%s, fields=%s)' % (query, fields)
#msg = re.sub(' in \[[^\]]+\]', ' in [...]', msg)
logger.debug(msg)
kwargs = {}
if query:
interpreter = MQLInterpreter(table)
query = interpreter.parse(query)
kwargs['whereclause'] = query
if distinct:
kwargs['distinct'] = distinct
query = select(fields, from_obj=table, **kwargs)
if limit >= 1:
query = query.limit(limit)
if alias:
query = query.alias(alias)
return query |
def ipython(args):
'''give the user an ipython shell, optionally with an endpoint of choice.
'''
# The client will announce itself (backend/database) unless it's get
from sregistry.main import get_client
client = get_client(args.endpoint)
client.announce(args.command)
from IPython import embed
embed() | give the user an ipython shell, optionally with an endpoint of choice. | Below is the the instruction that describes the task:
### Input:
give the user an ipython shell, optionally with an endpoint of choice.
### Response:
def ipython(args):
'''give the user an ipython shell, optionally with an endpoint of choice.
'''
# The client will announce itself (backend/database) unless it's get
from sregistry.main import get_client
client = get_client(args.endpoint)
client.announce(args.command)
from IPython import embed
embed() |
def to_json(self, filename=None,
encoding="utf-8", errors="strict",
multiline=False, **json_kwargs):
"""
Transform the BoxList object into a JSON string.
:param filename: If provided will save to file
:param encoding: File encoding
:param errors: How to handle encoding errors
:param multiline: Put each item in list onto it's own line
:param json_kwargs: additional arguments to pass to json.dump(s)
:return: string of JSON or return of `json.dump`
"""
if filename and multiline:
lines = [_to_json(item, filename=False, encoding=encoding,
errors=errors, **json_kwargs) for item in self]
with open(filename, 'w', encoding=encoding, errors=errors) as f:
f.write("\n".join(lines).decode('utf-8') if
sys.version_info < (3, 0) else "\n".join(lines))
else:
return _to_json(self.to_list(), filename=filename,
encoding=encoding, errors=errors, **json_kwargs) | Transform the BoxList object into a JSON string.
:param filename: If provided will save to file
:param encoding: File encoding
:param errors: How to handle encoding errors
:param multiline: Put each item in list onto it's own line
:param json_kwargs: additional arguments to pass to json.dump(s)
:return: string of JSON or return of `json.dump` | Below is the the instruction that describes the task:
### Input:
Transform the BoxList object into a JSON string.
:param filename: If provided will save to file
:param encoding: File encoding
:param errors: How to handle encoding errors
:param multiline: Put each item in list onto it's own line
:param json_kwargs: additional arguments to pass to json.dump(s)
:return: string of JSON or return of `json.dump`
### Response:
def to_json(self, filename=None,
encoding="utf-8", errors="strict",
multiline=False, **json_kwargs):
"""
Transform the BoxList object into a JSON string.
:param filename: If provided will save to file
:param encoding: File encoding
:param errors: How to handle encoding errors
:param multiline: Put each item in list onto it's own line
:param json_kwargs: additional arguments to pass to json.dump(s)
:return: string of JSON or return of `json.dump`
"""
if filename and multiline:
lines = [_to_json(item, filename=False, encoding=encoding,
errors=errors, **json_kwargs) for item in self]
with open(filename, 'w', encoding=encoding, errors=errors) as f:
f.write("\n".join(lines).decode('utf-8') if
sys.version_info < (3, 0) else "\n".join(lines))
else:
return _to_json(self.to_list(), filename=filename,
encoding=encoding, errors=errors, **json_kwargs) |
def beeswarm(*args, **kwargs):
"""
Create a R-like beeswarm plot showing the mean and datapoints.
The difference from matplotlib is only the left axis line is
shown, and ticklabels labeling each category of data can be added.
@param ax:
@param x:
@param kwargs: Besides xticklabels, which is a prettyplotlib-specific
argument which will label each individual beeswarm, many arguments for
matplotlib.pyplot.boxplot will be accepted:
http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot
Additional arguments include:
*median_color* : (default gray)
The color of median lines
*median_width* : (default 2)
Median line width
*colors* : (default None)
Colors to use when painting a dataseries, for example
list1 = [1,2,3]
list2 = [5,6,7]
ppl.beeswarm([list1, list2], colors=["red", "blue"], xticklabels=["data1", "data2"])
@return:
"""
ax, args, kwargs = maybe_get_ax(*args, **kwargs)
# If no ticklabels are specified, don't draw any
xticklabels = kwargs.pop('xticklabels', None)
colors = kwargs.pop('colors', None)
fontsize = kwargs.pop('fontsize', 10)
gray = _colors.set1[8]
red = _colors.set1[0]
blue = kwargs.pop('color', _colors.set1[1])
kwargs.setdefault('widths', 0.25)
kwargs.setdefault('sym', "o")
bp = _beeswarm(ax, *args, **kwargs)
kwargs.setdefault("median_color", gray)
kwargs.setdefault("median_linewidth", 2)
if xticklabels:
ax.xaxis.set_ticklabels(xticklabels, fontsize=fontsize)
show_caps = kwargs.pop('show_caps', True)
show_ticks = kwargs.pop('show_ticks', False)
remove_chartjunk(ax, ['top', 'right', 'bottom'], show_ticks=show_ticks)
linewidth = 0.75
plt.setp(bp['boxes'], color=blue, linewidth=linewidth)
plt.setp(bp['medians'], color=kwargs.pop("median_color"), linewidth=kwargs.pop("median_linewidth"))
#plt.setp(bp['whiskers'], color=blue, linestyle='solid',
# linewidth=linewidth)
for color, flier in zip(colors, bp['fliers']):
plt.setp(flier, color=color)
#if show_caps:
# plt.setp(bp['caps'], color=blue, linewidth=linewidth)
#else:
# plt.setp(bp['caps'], color='none')
ax.spines['left']._linewidth = 0.5
return bp | Create a R-like beeswarm plot showing the mean and datapoints.
The difference from matplotlib is only the left axis line is
shown, and ticklabels labeling each category of data can be added.
@param ax:
@param x:
@param kwargs: Besides xticklabels, which is a prettyplotlib-specific
argument which will label each individual beeswarm, many arguments for
matplotlib.pyplot.boxplot will be accepted:
http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot
Additional arguments include:
*median_color* : (default gray)
The color of median lines
*median_width* : (default 2)
Median line width
*colors* : (default None)
Colors to use when painting a dataseries, for example
list1 = [1,2,3]
list2 = [5,6,7]
ppl.beeswarm([list1, list2], colors=["red", "blue"], xticklabels=["data1", "data2"])
@return: | Below is the the instruction that describes the task:
### Input:
Create a R-like beeswarm plot showing the mean and datapoints.
The difference from matplotlib is only the left axis line is
shown, and ticklabels labeling each category of data can be added.
@param ax:
@param x:
@param kwargs: Besides xticklabels, which is a prettyplotlib-specific
argument which will label each individual beeswarm, many arguments for
matplotlib.pyplot.boxplot will be accepted:
http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot
Additional arguments include:
*median_color* : (default gray)
The color of median lines
*median_width* : (default 2)
Median line width
*colors* : (default None)
Colors to use when painting a dataseries, for example
list1 = [1,2,3]
list2 = [5,6,7]
ppl.beeswarm([list1, list2], colors=["red", "blue"], xticklabels=["data1", "data2"])
@return:
### Response:
def beeswarm(*args, **kwargs):
"""
Create a R-like beeswarm plot showing the mean and datapoints.
The difference from matplotlib is only the left axis line is
shown, and ticklabels labeling each category of data can be added.
@param ax:
@param x:
@param kwargs: Besides xticklabels, which is a prettyplotlib-specific
argument which will label each individual beeswarm, many arguments for
matplotlib.pyplot.boxplot will be accepted:
http://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes.boxplot
Additional arguments include:
*median_color* : (default gray)
The color of median lines
*median_width* : (default 2)
Median line width
*colors* : (default None)
Colors to use when painting a dataseries, for example
list1 = [1,2,3]
list2 = [5,6,7]
ppl.beeswarm([list1, list2], colors=["red", "blue"], xticklabels=["data1", "data2"])
@return:
"""
ax, args, kwargs = maybe_get_ax(*args, **kwargs)
# If no ticklabels are specified, don't draw any
xticklabels = kwargs.pop('xticklabels', None)
colors = kwargs.pop('colors', None)
fontsize = kwargs.pop('fontsize', 10)
gray = _colors.set1[8]
red = _colors.set1[0]
blue = kwargs.pop('color', _colors.set1[1])
kwargs.setdefault('widths', 0.25)
kwargs.setdefault('sym', "o")
bp = _beeswarm(ax, *args, **kwargs)
kwargs.setdefault("median_color", gray)
kwargs.setdefault("median_linewidth", 2)
if xticklabels:
ax.xaxis.set_ticklabels(xticklabels, fontsize=fontsize)
show_caps = kwargs.pop('show_caps', True)
show_ticks = kwargs.pop('show_ticks', False)
remove_chartjunk(ax, ['top', 'right', 'bottom'], show_ticks=show_ticks)
linewidth = 0.75
plt.setp(bp['boxes'], color=blue, linewidth=linewidth)
plt.setp(bp['medians'], color=kwargs.pop("median_color"), linewidth=kwargs.pop("median_linewidth"))
#plt.setp(bp['whiskers'], color=blue, linestyle='solid',
# linewidth=linewidth)
for color, flier in zip(colors, bp['fliers']):
plt.setp(flier, color=color)
#if show_caps:
# plt.setp(bp['caps'], color=blue, linewidth=linewidth)
#else:
# plt.setp(bp['caps'], color='none')
ax.spines['left']._linewidth = 0.5
return bp |
def is_interesting(entry):
"""Is this entry interesting?
``entry`` is an XML node representing one entry of the svn status
XML output. It looks like this::
<entry path="unchanged.txt">
<wc-status item="normal" revision="1" props="none">
<commit revision="1">
<author>mg</author>
<date>2015-02-06T07:52:38.163516Z</date>
</commit>
</wc-status>
</entry>
<entry path="added-but-not-committed.txt">
<wc-status item="added" revision="-1" props="none"></wc-status>
</entry>
<entry path="ext">
<wc-status item="external" props="none"></wc-status>
</entry>
<entry path="unknown.txt">
<wc-status props="none" item="unversioned"></wc-status>
</entry>
"""
if entry.get('path') == '.':
return False
status = entry.find('wc-status')
if status is None:
warning('svn status --xml parse error: <entry path="%s"> without'
' <wc-status>' % entry.get('path'))
return False
# For SVN externals we get two entries: one mentioning the
# existence of the external, and one about the status of the external.
if status.get('item') in ('unversioned', 'external'):
return False
return True | Is this entry interesting?
``entry`` is an XML node representing one entry of the svn status
XML output. It looks like this::
<entry path="unchanged.txt">
<wc-status item="normal" revision="1" props="none">
<commit revision="1">
<author>mg</author>
<date>2015-02-06T07:52:38.163516Z</date>
</commit>
</wc-status>
</entry>
<entry path="added-but-not-committed.txt">
<wc-status item="added" revision="-1" props="none"></wc-status>
</entry>
<entry path="ext">
<wc-status item="external" props="none"></wc-status>
</entry>
<entry path="unknown.txt">
<wc-status props="none" item="unversioned"></wc-status>
</entry> | Below is the the instruction that describes the task:
### Input:
Is this entry interesting?
``entry`` is an XML node representing one entry of the svn status
XML output. It looks like this::
<entry path="unchanged.txt">
<wc-status item="normal" revision="1" props="none">
<commit revision="1">
<author>mg</author>
<date>2015-02-06T07:52:38.163516Z</date>
</commit>
</wc-status>
</entry>
<entry path="added-but-not-committed.txt">
<wc-status item="added" revision="-1" props="none"></wc-status>
</entry>
<entry path="ext">
<wc-status item="external" props="none"></wc-status>
</entry>
<entry path="unknown.txt">
<wc-status props="none" item="unversioned"></wc-status>
</entry>
### Response:
def is_interesting(entry):
"""Is this entry interesting?
``entry`` is an XML node representing one entry of the svn status
XML output. It looks like this::
<entry path="unchanged.txt">
<wc-status item="normal" revision="1" props="none">
<commit revision="1">
<author>mg</author>
<date>2015-02-06T07:52:38.163516Z</date>
</commit>
</wc-status>
</entry>
<entry path="added-but-not-committed.txt">
<wc-status item="added" revision="-1" props="none"></wc-status>
</entry>
<entry path="ext">
<wc-status item="external" props="none"></wc-status>
</entry>
<entry path="unknown.txt">
<wc-status props="none" item="unversioned"></wc-status>
</entry>
"""
if entry.get('path') == '.':
return False
status = entry.find('wc-status')
if status is None:
warning('svn status --xml parse error: <entry path="%s"> without'
' <wc-status>' % entry.get('path'))
return False
# For SVN externals we get two entries: one mentioning the
# existence of the external, and one about the status of the external.
if status.get('item') in ('unversioned', 'external'):
return False
return True |
def upload(self,
file,
name=None,
prefix=None,
extensions=None,
overwrite=False,
public=False,
random_name=False,
**kwargs):
"""
To upload file
:param file: FileStorage object or string location
:param name: The name of the object.
:param prefix: A prefix for the object. Can be in the form of directory tree
:param extensions: list of extensions to allow. If empty, it will use all extension.
:param overwrite: bool - To overwrite if file exists
:param public: bool - To set acl to private or public-read. Having acl in kwargs will override it
:param random_name - If True and Name is None it will create a random name.
Otherwise it will use the file name. `name` will always take precedence
:param kwargs: extra params: ie: acl, meta_data etc.
:return: Object
"""
tmp_file = None
try:
if "acl" not in kwargs:
kwargs["acl"] = "public-read" if public else "private"
extra = kwargs
# It seems like this is a url, we'll try to download it first
if isinstance(file, string_types) and re.match(URL_REGEXP, file):
tmp_file = self._download_from_url(file)
file = tmp_file
# Create a random name
if not name and random_name:
name = uuid.uuid4().hex
# coming from a flask, or upload object
if isinstance(file, FileStorage):
extension = get_file_extension(file.filename)
if not name:
fname = get_file_name(file.filename).split("." + extension)[0]
name = slugify.slugify(fname)
else:
extension = get_file_extension(file)
if not name:
name = get_file_name(file)
if len(get_file_extension(name).strip()) == 0:
name += "." + extension
name = name.strip("/").strip()
if isinstance(self.driver, local.LocalStorageDriver):
name = secure_filename(name)
if prefix:
name = prefix.lstrip("/") + name
if not overwrite:
name = self._safe_object_name(name)
# For backwards compatibility, kwargs now holds `allowed_extensions`
allowed_extensions = extensions or kwargs.get("allowed_extensions")
if not allowed_extensions:
allowed_extensions = self.allowed_extensions
if extension.lower() not in allowed_extensions:
raise InvalidExtensionError("Invalid file extension: '.%s' " % extension)
if isinstance(file, FileStorage):
obj = self.container.upload_object_via_stream(iterator=file.stream,
object_name=name,
extra=extra)
else:
obj = self.container.upload_object(file_path=file,
object_name=name,
extra=extra)
return Object(obj=obj)
except Exception as e:
raise e
finally:
if tmp_file and os.path.isfile(tmp_file):
os.remove(tmp_file) | To upload file
:param file: FileStorage object or string location
:param name: The name of the object.
:param prefix: A prefix for the object. Can be in the form of directory tree
:param extensions: list of extensions to allow. If empty, it will use all extension.
:param overwrite: bool - To overwrite if file exists
:param public: bool - To set acl to private or public-read. Having acl in kwargs will override it
:param random_name - If True and Name is None it will create a random name.
Otherwise it will use the file name. `name` will always take precedence
:param kwargs: extra params: ie: acl, meta_data etc.
:return: Object | Below is the the instruction that describes the task:
### Input:
To upload file
:param file: FileStorage object or string location
:param name: The name of the object.
:param prefix: A prefix for the object. Can be in the form of directory tree
:param extensions: list of extensions to allow. If empty, it will use all extension.
:param overwrite: bool - To overwrite if file exists
:param public: bool - To set acl to private or public-read. Having acl in kwargs will override it
:param random_name - If True and Name is None it will create a random name.
Otherwise it will use the file name. `name` will always take precedence
:param kwargs: extra params: ie: acl, meta_data etc.
:return: Object
### Response:
def upload(self,
file,
name=None,
prefix=None,
extensions=None,
overwrite=False,
public=False,
random_name=False,
**kwargs):
"""
To upload file
:param file: FileStorage object or string location
:param name: The name of the object.
:param prefix: A prefix for the object. Can be in the form of directory tree
:param extensions: list of extensions to allow. If empty, it will use all extension.
:param overwrite: bool - To overwrite if file exists
:param public: bool - To set acl to private or public-read. Having acl in kwargs will override it
:param random_name - If True and Name is None it will create a random name.
Otherwise it will use the file name. `name` will always take precedence
:param kwargs: extra params: ie: acl, meta_data etc.
:return: Object
"""
tmp_file = None
try:
if "acl" not in kwargs:
kwargs["acl"] = "public-read" if public else "private"
extra = kwargs
# It seems like this is a url, we'll try to download it first
if isinstance(file, string_types) and re.match(URL_REGEXP, file):
tmp_file = self._download_from_url(file)
file = tmp_file
# Create a random name
if not name and random_name:
name = uuid.uuid4().hex
# coming from a flask, or upload object
if isinstance(file, FileStorage):
extension = get_file_extension(file.filename)
if not name:
fname = get_file_name(file.filename).split("." + extension)[0]
name = slugify.slugify(fname)
else:
extension = get_file_extension(file)
if not name:
name = get_file_name(file)
if len(get_file_extension(name).strip()) == 0:
name += "." + extension
name = name.strip("/").strip()
if isinstance(self.driver, local.LocalStorageDriver):
name = secure_filename(name)
if prefix:
name = prefix.lstrip("/") + name
if not overwrite:
name = self._safe_object_name(name)
# For backwards compatibility, kwargs now holds `allowed_extensions`
allowed_extensions = extensions or kwargs.get("allowed_extensions")
if not allowed_extensions:
allowed_extensions = self.allowed_extensions
if extension.lower() not in allowed_extensions:
raise InvalidExtensionError("Invalid file extension: '.%s' " % extension)
if isinstance(file, FileStorage):
obj = self.container.upload_object_via_stream(iterator=file.stream,
object_name=name,
extra=extra)
else:
obj = self.container.upload_object(file_path=file,
object_name=name,
extra=extra)
return Object(obj=obj)
except Exception as e:
raise e
finally:
if tmp_file and os.path.isfile(tmp_file):
os.remove(tmp_file) |
def start_engine(self):
'''
Start the child processes (one per device OS)
'''
if self.disable_security is True:
log.warning('***Not starting the authenticator process due to disable_security being set to True***')
else:
log.debug('Generating the private key')
self.__priv_key = nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE)
log.debug('Generating the signing key')
self.__signing_key = nacl.signing.SigningKey.generate()
# start the keepalive thread for the auth sub-process
self._processes.append(self._start_auth_proc())
log.debug('Starting the internal proxy')
proc = self._start_pub_px_proc()
self._processes.append(proc)
# publisher process start
pub_id = 0
for pub in self.publisher:
publisher_type, publisher_opts = list(pub.items())[0]
proc = self._start_pub_proc(publisher_type,
publisher_opts,
pub_id)
self._processes.append(proc)
pub_id += 1
# device process start
log.info('Starting child processes for each device type')
started_os_proc = []
for device_os, device_config in self.config_dict.items():
if not self._whitelist_blacklist(device_os):
log.debug('Not starting process for %s (whitelist-blacklist logic)', device_os)
# Ignore devices that are not in the whitelist (if defined),
# or those operating systems that are on the blacklist.
# This way we can prevent starting unwanted sub-processes.
continue
log.debug('Will start %d worker process(es) for %s', self.device_worker_processes, device_os)
for proc_index in range(self.device_worker_processes):
self._processes.append(self._start_dev_proc(device_os,
device_config))
started_os_proc.append(device_os)
# start the server process
self._processes.append(self._start_srv_proc(started_os_proc))
# start listener process
for lst in self.listener:
listener_type, listener_opts = list(lst.items())[0]
proc = self._start_lst_proc(listener_type,
listener_opts)
self._processes.append(proc)
thread = threading.Thread(target=self._check_children)
thread.start() | Start the child processes (one per device OS) | Below is the the instruction that describes the task:
### Input:
Start the child processes (one per device OS)
### Response:
def start_engine(self):
'''
Start the child processes (one per device OS)
'''
if self.disable_security is True:
log.warning('***Not starting the authenticator process due to disable_security being set to True***')
else:
log.debug('Generating the private key')
self.__priv_key = nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE)
log.debug('Generating the signing key')
self.__signing_key = nacl.signing.SigningKey.generate()
# start the keepalive thread for the auth sub-process
self._processes.append(self._start_auth_proc())
log.debug('Starting the internal proxy')
proc = self._start_pub_px_proc()
self._processes.append(proc)
# publisher process start
pub_id = 0
for pub in self.publisher:
publisher_type, publisher_opts = list(pub.items())[0]
proc = self._start_pub_proc(publisher_type,
publisher_opts,
pub_id)
self._processes.append(proc)
pub_id += 1
# device process start
log.info('Starting child processes for each device type')
started_os_proc = []
for device_os, device_config in self.config_dict.items():
if not self._whitelist_blacklist(device_os):
log.debug('Not starting process for %s (whitelist-blacklist logic)', device_os)
# Ignore devices that are not in the whitelist (if defined),
# or those operating systems that are on the blacklist.
# This way we can prevent starting unwanted sub-processes.
continue
log.debug('Will start %d worker process(es) for %s', self.device_worker_processes, device_os)
for proc_index in range(self.device_worker_processes):
self._processes.append(self._start_dev_proc(device_os,
device_config))
started_os_proc.append(device_os)
# start the server process
self._processes.append(self._start_srv_proc(started_os_proc))
# start listener process
for lst in self.listener:
listener_type, listener_opts = list(lst.items())[0]
proc = self._start_lst_proc(listener_type,
listener_opts)
self._processes.append(proc)
thread = threading.Thread(target=self._check_children)
thread.start() |
async def nodes(self, text, opts=None, user=None):
'''
A simple non-streaming way to return a list of nodes.
'''
return [n async for n in self.eval(text, opts=opts, user=user)] | A simple non-streaming way to return a list of nodes. | Below is the the instruction that describes the task:
### Input:
A simple non-streaming way to return a list of nodes.
### Response:
async def nodes(self, text, opts=None, user=None):
'''
A simple non-streaming way to return a list of nodes.
'''
return [n async for n in self.eval(text, opts=opts, user=user)] |
def _define(self):
"""
gate ch a,b {
h b;
sdg b;
cx a,b;
h b;
t b;
cx a,b;
t b;
h b;
s b;
x b;
s a;}
"""
definition = []
q = QuantumRegister(2, "q")
rule = [
(HGate(), [q[1]], []),
(SdgGate(), [q[1]], []),
(CnotGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
(TGate(), [q[1]], []),
(CnotGate(), [q[0], q[1]], []),
(TGate(), [q[1]], []),
(HGate(), [q[1]], []),
(SGate(), [q[1]], []),
(XGate(), [q[1]], []),
(SGate(), [q[0]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition | gate ch a,b {
h b;
sdg b;
cx a,b;
h b;
t b;
cx a,b;
t b;
h b;
s b;
x b;
s a;} | Below is the the instruction that describes the task:
### Input:
gate ch a,b {
h b;
sdg b;
cx a,b;
h b;
t b;
cx a,b;
t b;
h b;
s b;
x b;
s a;}
### Response:
def _define(self):
"""
gate ch a,b {
h b;
sdg b;
cx a,b;
h b;
t b;
cx a,b;
t b;
h b;
s b;
x b;
s a;}
"""
definition = []
q = QuantumRegister(2, "q")
rule = [
(HGate(), [q[1]], []),
(SdgGate(), [q[1]], []),
(CnotGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
(TGate(), [q[1]], []),
(CnotGate(), [q[0], q[1]], []),
(TGate(), [q[1]], []),
(HGate(), [q[1]], []),
(SGate(), [q[1]], []),
(XGate(), [q[1]], []),
(SGate(), [q[0]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition |
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True | Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist. | Below is the the instruction that describes the task:
### Input:
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
### Response:
def _fill_parameters(self):
"""
Fill in the _parameters dict from the properties file.
Args:
None
Returns:
True
Todo:
Figure out what could go wrong and at least acknowledge the the
fact that Murphy was an optimist.
"""
self._parameters = self._config.get('parameters', {})
self._fill_defaults()
for k in self._parameters.keys():
try:
if self._parameters[k].startswith(self.SSM) and self._parameters[k].endswith(']'):
parts = self._parameters[k].split(':')
tmp = parts[1].replace(']', '')
val = self._get_ssm_parameter(tmp)
if val:
self._parameters[k] = val
else:
logging.error('SSM parameter {} not found'.format(tmp))
return False
elif self._parameters[k] == self.ASK:
val = None
a1 = '__x___'
a2 = '__y___'
prompt1 = "Enter value for '{}': ".format(k)
prompt2 = "Confirm value for '{}': ".format(k)
while a1 != a2:
a1 = getpass.getpass(prompt=prompt1)
a2 = getpass.getpass(prompt=prompt2)
if a1 == a2:
val = a1
else:
print('values do not match, try again')
self._parameters[k] = val
except:
pass
return True |
def change_ref(self, r0=None, lmax=None):
"""
Return a new SHMagCoeffs class instance with a different reference r0.
Usage
-----
clm = x.change_ref([r0, lmax])
Returns
-------
clm : SHMagCoeffs class instance.
Parameters
----------
r0 : float, optional, default = self.r0
The reference radius of the spherical harmonic coefficients.
lmax : int, optional, default = self.lmax
Maximum spherical harmonic degree to output.
Description
-----------
This method returns a new class instance of the magnetic potential,
but using a difference reference r0. When changing the reference
radius r0, the spherical harmonic coefficients will be upward or
downward continued under the assumption that the reference radius is
exterior to the body.
"""
if lmax is None:
lmax = self.lmax
clm = self.pad(lmax)
if r0 is not None and r0 != self.r0:
for l in _np.arange(lmax+1):
clm.coeffs[:, l, :l+1] *= (self.r0 / r0)**(l+2)
if self.errors is not None:
clm.errors[:, l, :l+1] *= (self.r0 / r0)**(l+2)
clm.r0 = r0
return clm | Return a new SHMagCoeffs class instance with a different reference r0.
Usage
-----
clm = x.change_ref([r0, lmax])
Returns
-------
clm : SHMagCoeffs class instance.
Parameters
----------
r0 : float, optional, default = self.r0
The reference radius of the spherical harmonic coefficients.
lmax : int, optional, default = self.lmax
Maximum spherical harmonic degree to output.
Description
-----------
This method returns a new class instance of the magnetic potential,
but using a difference reference r0. When changing the reference
radius r0, the spherical harmonic coefficients will be upward or
downward continued under the assumption that the reference radius is
exterior to the body. | Below is the the instruction that describes the task:
### Input:
Return a new SHMagCoeffs class instance with a different reference r0.
Usage
-----
clm = x.change_ref([r0, lmax])
Returns
-------
clm : SHMagCoeffs class instance.
Parameters
----------
r0 : float, optional, default = self.r0
The reference radius of the spherical harmonic coefficients.
lmax : int, optional, default = self.lmax
Maximum spherical harmonic degree to output.
Description
-----------
This method returns a new class instance of the magnetic potential,
but using a difference reference r0. When changing the reference
radius r0, the spherical harmonic coefficients will be upward or
downward continued under the assumption that the reference radius is
exterior to the body.
### Response:
def change_ref(self, r0=None, lmax=None):
"""
Return a new SHMagCoeffs class instance with a different reference r0.
Usage
-----
clm = x.change_ref([r0, lmax])
Returns
-------
clm : SHMagCoeffs class instance.
Parameters
----------
r0 : float, optional, default = self.r0
The reference radius of the spherical harmonic coefficients.
lmax : int, optional, default = self.lmax
Maximum spherical harmonic degree to output.
Description
-----------
This method returns a new class instance of the magnetic potential,
but using a difference reference r0. When changing the reference
radius r0, the spherical harmonic coefficients will be upward or
downward continued under the assumption that the reference radius is
exterior to the body.
"""
if lmax is None:
lmax = self.lmax
clm = self.pad(lmax)
if r0 is not None and r0 != self.r0:
for l in _np.arange(lmax+1):
clm.coeffs[:, l, :l+1] *= (self.r0 / r0)**(l+2)
if self.errors is not None:
clm.errors[:, l, :l+1] *= (self.r0 / r0)**(l+2)
clm.r0 = r0
return clm |
def _parse_cli_filters(filters):
"""
Parse the filters from the CLI and turn them into a filter dict for boto.
:param filters:
:return:
"""
parsed_filters = []
for filter_entry in filters:
filter_parts = re.match('^Name=(?P<name_value>[^,]+),Values=\[?(?P<key_values>[^\]]+)\]?', filter_entry)
parsed_filters.append({
'Name': filter_parts.group('name_value'),
'Values': filter_parts.group('key_values').split(',')
})
return parsed_filters | Parse the filters from the CLI and turn them into a filter dict for boto.
:param filters:
:return: | Below is the the instruction that describes the task:
### Input:
Parse the filters from the CLI and turn them into a filter dict for boto.
:param filters:
:return:
### Response:
def _parse_cli_filters(filters):
"""
Parse the filters from the CLI and turn them into a filter dict for boto.
:param filters:
:return:
"""
parsed_filters = []
for filter_entry in filters:
filter_parts = re.match('^Name=(?P<name_value>[^,]+),Values=\[?(?P<key_values>[^\]]+)\]?', filter_entry)
parsed_filters.append({
'Name': filter_parts.group('name_value'),
'Values': filter_parts.group('key_values').split(',')
})
return parsed_filters |
def runCLI():
"""
The starting point for the execution of the Scrapple command line tool.
runCLI uses the docstring as the usage description for the scrapple command. \
The class for the required command is selected by a dynamic dispatch, and the \
command is executed through the execute_command() method of the command class.
"""
args = docopt(__doc__, version='0.3.0')
try:
check_arguments(args)
command_list = ['genconfig', 'run', 'generate']
select = itemgetter('genconfig', 'run', 'generate')
selectedCommand = command_list[select(args).index(True)]
cmdClass = get_command_class(selectedCommand)
obj = cmdClass(args)
obj.execute_command()
except POSSIBLE_EXCEPTIONS as e:
print('\n', e, '\n') | The starting point for the execution of the Scrapple command line tool.
runCLI uses the docstring as the usage description for the scrapple command. \
The class for the required command is selected by a dynamic dispatch, and the \
command is executed through the execute_command() method of the command class. | Below is the the instruction that describes the task:
### Input:
The starting point for the execution of the Scrapple command line tool.
runCLI uses the docstring as the usage description for the scrapple command. \
The class for the required command is selected by a dynamic dispatch, and the \
command is executed through the execute_command() method of the command class.
### Response:
def runCLI():
"""
The starting point for the execution of the Scrapple command line tool.
runCLI uses the docstring as the usage description for the scrapple command. \
The class for the required command is selected by a dynamic dispatch, and the \
command is executed through the execute_command() method of the command class.
"""
args = docopt(__doc__, version='0.3.0')
try:
check_arguments(args)
command_list = ['genconfig', 'run', 'generate']
select = itemgetter('genconfig', 'run', 'generate')
selectedCommand = command_list[select(args).index(True)]
cmdClass = get_command_class(selectedCommand)
obj = cmdClass(args)
obj.execute_command()
except POSSIBLE_EXCEPTIONS as e:
print('\n', e, '\n') |
def get_page(pno, zoom = False, max_size = None, first = False):
"""Return a PNG image for a document page number.
"""
dlist = dlist_tab[pno] # get display list of page number
if not dlist: # create if not yet there
dlist_tab[pno] = doc[pno].getDisplayList()
dlist = dlist_tab[pno]
r = dlist.rect # the page rectangle
clip = r
# ensure image fits screen:
# exploit, but do not exceed width or height
zoom_0 = 1
if max_size:
zoom_0 = min(1, max_size[0] / r.width, max_size[1] / r.height)
if zoom_0 == 1:
zoom_0 = min(max_size[0] / r.width, max_size[1] / r.height)
mat_0 = fitz.Matrix(zoom_0, zoom_0)
if not zoom: # show total page
pix = dlist.getPixmap(matrix = mat_0, alpha=False)
else:
mp = r.tl + (r.br - r.tl) * 0.5 # page rect center
w2 = r.width / 2
h2 = r.height / 2
clip = r * 0.5
tl = zoom[0] # old top-left
tl.x += zoom[1] * (w2 / 2)
tl.x = max(0, tl.x)
tl.x = min(w2, tl.x)
tl.y += zoom[2] * (h2 / 2)
tl.y = max(0, tl.y)
tl.y = min(h2, tl.y)
clip = fitz.Rect(tl, tl.x + w2, tl.y + h2)
mat = mat_0 * fitz.Matrix(2, 2) # zoom matrix
pix = dlist.getPixmap(alpha=False, matrix=mat, clip=clip)
if first: # first call: tkinter still inactive
img = pix.getPNGData() # so use fitz png output
else: # else take tk photo image
pilimg = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
img = ImageTk.PhotoImage(pilimg)
return img, clip.tl | Return a PNG image for a document page number. | Below is the the instruction that describes the task:
### Input:
Return a PNG image for a document page number.
### Response:
def get_page(pno, zoom = False, max_size = None, first = False):
"""Return a PNG image for a document page number.
"""
dlist = dlist_tab[pno] # get display list of page number
if not dlist: # create if not yet there
dlist_tab[pno] = doc[pno].getDisplayList()
dlist = dlist_tab[pno]
r = dlist.rect # the page rectangle
clip = r
# ensure image fits screen:
# exploit, but do not exceed width or height
zoom_0 = 1
if max_size:
zoom_0 = min(1, max_size[0] / r.width, max_size[1] / r.height)
if zoom_0 == 1:
zoom_0 = min(max_size[0] / r.width, max_size[1] / r.height)
mat_0 = fitz.Matrix(zoom_0, zoom_0)
if not zoom: # show total page
pix = dlist.getPixmap(matrix = mat_0, alpha=False)
else:
mp = r.tl + (r.br - r.tl) * 0.5 # page rect center
w2 = r.width / 2
h2 = r.height / 2
clip = r * 0.5
tl = zoom[0] # old top-left
tl.x += zoom[1] * (w2 / 2)
tl.x = max(0, tl.x)
tl.x = min(w2, tl.x)
tl.y += zoom[2] * (h2 / 2)
tl.y = max(0, tl.y)
tl.y = min(h2, tl.y)
clip = fitz.Rect(tl, tl.x + w2, tl.y + h2)
mat = mat_0 * fitz.Matrix(2, 2) # zoom matrix
pix = dlist.getPixmap(alpha=False, matrix=mat, clip=clip)
if first: # first call: tkinter still inactive
img = pix.getPNGData() # so use fitz png output
else: # else take tk photo image
pilimg = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
img = ImageTk.PhotoImage(pilimg)
return img, clip.tl |
def get_bibtex(isbn_identifier):
"""
Get a BibTeX string for the given ISBN.
:param isbn_identifier: ISBN to fetch BibTeX entry for.
:returns: A BibTeX string or ``None`` if could not fetch it.
>>> get_bibtex('9783161484100')
'@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}'
"""
# Try to find the BibTeX using associated DOIs
bibtex = doi.get_bibtex(to_doi(isbn_identifier))
if bibtex is None:
# In some cases, there are no DOIs for a given ISBN. In this case, try
# to fetch bibtex directly from the ISBN, using a combination of
# Google Books and worldcat.org results.
bibtex = isbnlib.registry.bibformatters['bibtex'](
isbnlib.meta(isbn_identifier, 'default'))
return bibtex | Get a BibTeX string for the given ISBN.
:param isbn_identifier: ISBN to fetch BibTeX entry for.
:returns: A BibTeX string or ``None`` if could not fetch it.
>>> get_bibtex('9783161484100')
'@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}' | Below is the the instruction that describes the task:
### Input:
Get a BibTeX string for the given ISBN.
:param isbn_identifier: ISBN to fetch BibTeX entry for.
:returns: A BibTeX string or ``None`` if could not fetch it.
>>> get_bibtex('9783161484100')
'@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}'
### Response:
def get_bibtex(isbn_identifier):
"""
Get a BibTeX string for the given ISBN.
:param isbn_identifier: ISBN to fetch BibTeX entry for.
:returns: A BibTeX string or ``None`` if could not fetch it.
>>> get_bibtex('9783161484100')
'@book{9783161484100,\\n title = {Berkeley, Oakland: Albany, Emeryville, Alameda, Kensington},\\n author = {Peekaboo Maps},\\n isbn = {9783161484100},\\n year = {2009},\\n publisher = {Peek A Boo Maps}\\n}'
"""
# Try to find the BibTeX using associated DOIs
bibtex = doi.get_bibtex(to_doi(isbn_identifier))
if bibtex is None:
# In some cases, there are no DOIs for a given ISBN. In this case, try
# to fetch bibtex directly from the ISBN, using a combination of
# Google Books and worldcat.org results.
bibtex = isbnlib.registry.bibformatters['bibtex'](
isbnlib.meta(isbn_identifier, 'default'))
return bibtex |
def put(self, name=None, user_ids=None):
"""
:param name: str of name for the account, defaults to the created timestamp
:param user_ids: list of int of users to give access to this account defaults to current user
:return: Account dict created
"""
return self.connection.put('account', data=dict(name=name, user_ids=user_ids)) | :param name: str of name for the account, defaults to the created timestamp
:param user_ids: list of int of users to give access to this account defaults to current user
:return: Account dict created | Below is the the instruction that describes the task:
### Input:
:param name: str of name for the account, defaults to the created timestamp
:param user_ids: list of int of users to give access to this account defaults to current user
:return: Account dict created
### Response:
def put(self, name=None, user_ids=None):
"""
:param name: str of name for the account, defaults to the created timestamp
:param user_ids: list of int of users to give access to this account defaults to current user
:return: Account dict created
"""
return self.connection.put('account', data=dict(name=name, user_ids=user_ids)) |
def filter_belief(stmts_in, belief_cutoff, **kwargs):
"""Filter to statements with belief above a given cutoff.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
belief_cutoff : float
Only statements with belief above the belief_cutoff will be returned.
Here 0 < belief_cutoff < 1.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
dump_pkl = kwargs.get('save')
logger.info('Filtering %d statements to above %f belief' %
(len(stmts_in), belief_cutoff))
# The first round of filtering is in the top-level list
stmts_out = []
# Now we eliminate supports/supported-by
for stmt in stmts_in:
if stmt.belief < belief_cutoff:
continue
stmts_out.append(stmt)
supp_by = []
supp = []
for st in stmt.supports:
if st.belief >= belief_cutoff:
supp.append(st)
for st in stmt.supported_by:
if st.belief >= belief_cutoff:
supp_by.append(st)
stmt.supports = supp
stmt.supported_by = supp_by
logger.info('%d statements after filter...' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out | Filter to statements with belief above a given cutoff.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
belief_cutoff : float
Only statements with belief above the belief_cutoff will be returned.
Here 0 < belief_cutoff < 1.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements. | Below is the the instruction that describes the task:
### Input:
Filter to statements with belief above a given cutoff.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
belief_cutoff : float
Only statements with belief above the belief_cutoff will be returned.
Here 0 < belief_cutoff < 1.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
### Response:
def filter_belief(stmts_in, belief_cutoff, **kwargs):
"""Filter to statements with belief above a given cutoff.
Parameters
----------
stmts_in : list[indra.statements.Statement]
A list of statements to filter.
belief_cutoff : float
Only statements with belief above the belief_cutoff will be returned.
Here 0 < belief_cutoff < 1.
save : Optional[str]
The name of a pickle file to save the results (stmts_out) into.
Returns
-------
stmts_out : list[indra.statements.Statement]
A list of filtered statements.
"""
dump_pkl = kwargs.get('save')
logger.info('Filtering %d statements to above %f belief' %
(len(stmts_in), belief_cutoff))
# The first round of filtering is in the top-level list
stmts_out = []
# Now we eliminate supports/supported-by
for stmt in stmts_in:
if stmt.belief < belief_cutoff:
continue
stmts_out.append(stmt)
supp_by = []
supp = []
for st in stmt.supports:
if st.belief >= belief_cutoff:
supp.append(st)
for st in stmt.supported_by:
if st.belief >= belief_cutoff:
supp_by.append(st)
stmt.supports = supp
stmt.supported_by = supp_by
logger.info('%d statements after filter...' % len(stmts_out))
if dump_pkl:
dump_statements(stmts_out, dump_pkl)
return stmts_out |
def update_changes_after_row_delete(self, row_num):
"""
Update self.changes so that row numbers for edited rows are still correct.
I.e., if row 4 was edited and then row 2 was deleted, row 4 becomes row 3.
This function updates self.changes to reflect that.
"""
if row_num in self.changes.copy():
self.changes.remove(row_num)
updated_rows = []
for changed_row in self.changes:
if changed_row == -1:
updated_rows.append(-1)
if changed_row > row_num:
updated_rows.append(changed_row - 1)
if changed_row < row_num:
updated_rows.append(changed_row)
self.changes = set(updated_rows) | Update self.changes so that row numbers for edited rows are still correct.
I.e., if row 4 was edited and then row 2 was deleted, row 4 becomes row 3.
This function updates self.changes to reflect that. | Below is the the instruction that describes the task:
### Input:
Update self.changes so that row numbers for edited rows are still correct.
I.e., if row 4 was edited and then row 2 was deleted, row 4 becomes row 3.
This function updates self.changes to reflect that.
### Response:
def update_changes_after_row_delete(self, row_num):
"""
Update self.changes so that row numbers for edited rows are still correct.
I.e., if row 4 was edited and then row 2 was deleted, row 4 becomes row 3.
This function updates self.changes to reflect that.
"""
if row_num in self.changes.copy():
self.changes.remove(row_num)
updated_rows = []
for changed_row in self.changes:
if changed_row == -1:
updated_rows.append(-1)
if changed_row > row_num:
updated_rows.append(changed_row - 1)
if changed_row < row_num:
updated_rows.append(changed_row)
self.changes = set(updated_rows) |
def get_vmss(access_token, subscription_id, resource_group, vmss_name):
'''Get virtual machine scale set details.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
Returns:
HTTP response. JSON body of scale set properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'?api-version=', COMP_API])
return do_get(endpoint, access_token) | Get virtual machine scale set details.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
Returns:
HTTP response. JSON body of scale set properties. | Below is the the instruction that describes the task:
### Input:
Get virtual machine scale set details.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
Returns:
HTTP response. JSON body of scale set properties.
### Response:
def get_vmss(access_token, subscription_id, resource_group, vmss_name):
'''Get virtual machine scale set details.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
vmss_name (str): Name of the virtual machine scale set.
Returns:
HTTP response. JSON body of scale set properties.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourceGroups/', resource_group,
'/providers/Microsoft.Compute/virtualMachineScaleSets/', vmss_name,
'?api-version=', COMP_API])
return do_get(endpoint, access_token) |
def _init_exception_logging(self, app):
"""
Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)
if not enabled:
return
exception_telemetry_client = TelemetryClient(
self._key, telemetry_channel=self._channel)
@app.errorhandler(Exception)
def exception_handler(exception):
if HTTPException and isinstance(exception, HTTPException):
return exception
try:
raise exception
except Exception:
exception_telemetry_client.track_exception()
finally:
raise exception
self._exception_telemetry_client = exception_telemetry_client | Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension. | Below is the the instruction that describes the task:
### Input:
Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
### Response:
def _init_exception_logging(self, app):
"""
Sets up exception logging unless ``APPINSIGHTS_DISABLE_EXCEPTION_LOGGING``
is set in the Flask config.
Args:
app (flask.Flask). the Flask application for which to initialize the extension.
"""
enabled = not app.config.get(CONF_DISABLE_EXCEPTION_LOGGING, False)
if not enabled:
return
exception_telemetry_client = TelemetryClient(
self._key, telemetry_channel=self._channel)
@app.errorhandler(Exception)
def exception_handler(exception):
if HTTPException and isinstance(exception, HTTPException):
return exception
try:
raise exception
except Exception:
exception_telemetry_client.track_exception()
finally:
raise exception
self._exception_telemetry_client = exception_telemetry_client |
def connect(self, ctrl):
"""Connect to the device."""
if self.prompt:
self.prompt_re = self.driver.make_dynamic_prompt(self.prompt)
else:
self.prompt_re = self.driver.prompt_re
self.ctrl = ctrl
if self.protocol.connect(self.driver):
if self.protocol.authenticate(self.driver):
self.ctrl.try_read_prompt(1)
if not self.prompt:
self.prompt = self.ctrl.detect_prompt()
if self.is_target:
self.update_config_mode()
if self.mode is not None and self.mode != 'global':
self.last_error_msg = "Device is not in global mode. Disconnected."
self.chain.disconnect()
return False
self.prompt_re = self.driver.make_dynamic_prompt(self.prompt)
self.connected = True
if self.is_target is False:
if self.os_version is None:
self.update_os_version()
self.update_hostname()
else:
self._connected_to_target()
return True
else:
self.connected = False
return False | Connect to the device. | Below is the the instruction that describes the task:
### Input:
Connect to the device.
### Response:
def connect(self, ctrl):
"""Connect to the device."""
if self.prompt:
self.prompt_re = self.driver.make_dynamic_prompt(self.prompt)
else:
self.prompt_re = self.driver.prompt_re
self.ctrl = ctrl
if self.protocol.connect(self.driver):
if self.protocol.authenticate(self.driver):
self.ctrl.try_read_prompt(1)
if not self.prompt:
self.prompt = self.ctrl.detect_prompt()
if self.is_target:
self.update_config_mode()
if self.mode is not None and self.mode != 'global':
self.last_error_msg = "Device is not in global mode. Disconnected."
self.chain.disconnect()
return False
self.prompt_re = self.driver.make_dynamic_prompt(self.prompt)
self.connected = True
if self.is_target is False:
if self.os_version is None:
self.update_os_version()
self.update_hostname()
else:
self._connected_to_target()
return True
else:
self.connected = False
return False |
async def stations(self):
"""Retrieve stations."""
data = await self.retrieve(API_DISTRITS)
Station = namedtuple('Station', ['latitude', 'longitude',
'idAreaAviso', 'idConselho',
'idDistrito', 'idRegiao',
'globalIdLocal', 'local'])
_stations = []
for station in data['data']:
_station = Station(
self._to_number(station['latitude']),
self._to_number(station['longitude']),
station['idAreaAviso'],
station['idConcelho'],
station['idDistrito'],
station['idRegiao'],
station['globalIdLocal']//100 * 100,
station['local'],
)
_stations.append(_station)
return _stations | Retrieve stations. | Below is the the instruction that describes the task:
### Input:
Retrieve stations.
### Response:
async def stations(self):
"""Retrieve stations."""
data = await self.retrieve(API_DISTRITS)
Station = namedtuple('Station', ['latitude', 'longitude',
'idAreaAviso', 'idConselho',
'idDistrito', 'idRegiao',
'globalIdLocal', 'local'])
_stations = []
for station in data['data']:
_station = Station(
self._to_number(station['latitude']),
self._to_number(station['longitude']),
station['idAreaAviso'],
station['idConcelho'],
station['idDistrito'],
station['idRegiao'],
station['globalIdLocal']//100 * 100,
station['local'],
)
_stations.append(_station)
return _stations |
def dt_comp(self, sampled_topics):
"""
Compute document-topic matrix from sampled_topics.
"""
samples = sampled_topics.shape[0]
dt = np.zeros((self.D, self.K, samples))
for s in range(samples):
dt[:, :, s] = \
samplers_lda.dt_comp(self.docid, sampled_topics[s, :],
self.N, self.K, self.D, self.alpha)
return dt | Compute document-topic matrix from sampled_topics. | Below is the the instruction that describes the task:
### Input:
Compute document-topic matrix from sampled_topics.
### Response:
def dt_comp(self, sampled_topics):
"""
Compute document-topic matrix from sampled_topics.
"""
samples = sampled_topics.shape[0]
dt = np.zeros((self.D, self.K, samples))
for s in range(samples):
dt[:, :, s] = \
samplers_lda.dt_comp(self.docid, sampled_topics[s, :],
self.N, self.K, self.D, self.alpha)
return dt |
def list_namespaced_resource_quota(self, namespace, **kwargs):
"""
list or watch objects of kind ResourceQuota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_quota(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ResourceQuotaList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_resource_quota_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_resource_quota_with_http_info(namespace, **kwargs)
return data | list or watch objects of kind ResourceQuota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_quota(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ResourceQuotaList
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
list or watch objects of kind ResourceQuota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_quota(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ResourceQuotaList
If the method is called asynchronously,
returns the request thread.
### Response:
def list_namespaced_resource_quota(self, namespace, **kwargs):
"""
list or watch objects of kind ResourceQuota
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_namespaced_resource_quota(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ResourceQuotaList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_namespaced_resource_quota_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_resource_quota_with_http_info(namespace, **kwargs)
return data |
def solve(self, max_worlds=10000, silent=False):
"""
find the best world to make people happy
"""
self.num_worlds = 0
num_unhappy = 0
for tax_rate in range(self.tax_range[0],self.tax_range[1]):
for equity in range(self.equity_range[0],self.equity_range[1]):
for tradition in range(self.tradition_range[0],self.tradition_range[1]):
self.num_worlds += 1
if self.num_worlds > max_worlds:
break
w = World(str(self.num_worlds).zfill(6), [5000, tax_rate/10, tradition/10, equity/10])
world_happiness = 0
num_unhappy = 0
for person in self.all_people:
wh = Happiness(person, w)
world_happiness += wh.rating
if wh.rating < 0:
num_unhappy += 1
if world_happiness > self.net_happiness:
self.net_happiness = world_happiness
self.unhappy_people = num_unhappy
if not silent:
print('found better world - ' + w.nme + ' = ' + str(world_happiness) + ' - total unhappy_people = ' + str(self.unhappy_people)) | find the best world to make people happy | Below is the the instruction that describes the task:
### Input:
find the best world to make people happy
### Response:
def solve(self, max_worlds=10000, silent=False):
"""
find the best world to make people happy
"""
self.num_worlds = 0
num_unhappy = 0
for tax_rate in range(self.tax_range[0],self.tax_range[1]):
for equity in range(self.equity_range[0],self.equity_range[1]):
for tradition in range(self.tradition_range[0],self.tradition_range[1]):
self.num_worlds += 1
if self.num_worlds > max_worlds:
break
w = World(str(self.num_worlds).zfill(6), [5000, tax_rate/10, tradition/10, equity/10])
world_happiness = 0
num_unhappy = 0
for person in self.all_people:
wh = Happiness(person, w)
world_happiness += wh.rating
if wh.rating < 0:
num_unhappy += 1
if world_happiness > self.net_happiness:
self.net_happiness = world_happiness
self.unhappy_people = num_unhappy
if not silent:
print('found better world - ' + w.nme + ' = ' + str(world_happiness) + ' - total unhappy_people = ' + str(self.unhappy_people)) |
def construct_meta(need_data, env):
"""
Constructs the node-structure for the status container
:param need_data: need_info container
:return: node
"""
hide_options = env.config.needs_hide_options
if not isinstance(hide_options, list):
raise SphinxError('Config parameter needs_hide_options must be of type list')
node_meta = nodes.line_block(classes=['needs_meta'])
# need parameters
param_status = "status: "
param_tags = "tags: "
if need_data["status"] is not None and 'status' not in hide_options:
status_line = nodes.line(classes=['status'])
# node_status = nodes.line(param_status, param_status, classes=['status'])
node_status = nodes.inline(param_status, param_status, classes=['status'])
status_line.append(node_status)
status_line.append(nodes.inline(need_data["status"], need_data["status"],
classes=["needs-status", str(need_data['status'])]))
node_meta.append(status_line)
if need_data["tags"] and 'tags' not in hide_options:
tag_line = nodes.line(classes=['tags'])
# node_tags = nodes.line(param_tags, param_tags, classes=['tags'])
node_tags = nodes.inline(param_tags, param_tags, classes=['tags'])
tag_line.append(node_tags)
for tag in need_data['tags']:
# node_tags.append(nodes.inline(tag, tag, classes=["needs-tag", str(tag)]))
# node_tags.append(nodes.inline(' ', ' '))
tag_line.append(nodes.inline(tag, tag, classes=["needs-tag", str(tag)]))
tag_line.append(nodes.inline(' ', ' '))
node_meta.append(tag_line)
# Links incoming
if need_data['links_back'] and 'links_back' not in hide_options:
node_incoming_line = nodes.line(classes=['links', 'incoming'])
prefix = "links incoming: "
node_incoming_prefix = nodes.inline(prefix, prefix)
node_incoming_line.append(node_incoming_prefix)
node_incoming_links = Need_incoming(reftarget=need_data['id'])
node_incoming_links.append(nodes.inline(need_data['id'], need_data['id']))
node_incoming_line.append(node_incoming_links)
node_meta.append(node_incoming_line)
# # Links outgoing
if need_data['links'] and 'links' not in hide_options:
node_outgoing_line = nodes.line(classes=['links', 'outgoing'])
prefix = "links outgoing: "
node_outgoing_prefix = nodes.inline(prefix, prefix)
node_outgoing_line.append(node_outgoing_prefix)
node_outgoing_links = Need_outgoing(reftarget=need_data['id'])
node_outgoing_links.append(nodes.inline(need_data['id'], need_data['id']))
node_outgoing_line.append(node_outgoing_links)
node_meta.append(node_outgoing_line)
extra_options = getattr(env.config, 'needs_extra_options', {})
node_extra_options = []
for key, value in extra_options.items():
if key in hide_options:
continue
param_data = need_data[key]
if param_data is None or not param_data:
continue
param_option = '{}: '.format(key)
option_line = nodes.line(classes=['extra_option'])
option_line.append(nodes.inline(param_option, param_option, classes=['extra_option']))
option_line.append(nodes.inline(param_data, param_data,
classes=["needs-extra-option", str(key)]))
node_extra_options.append(option_line)
node_meta += node_extra_options
global_options = getattr(env.config, 'needs_global_options', {})
node_global_options = []
for key, value in global_options.items():
# If a global option got locally overwritten, it must already part of extra_options.
# In this skipp output, as this is done during extra_option handling
if key in extra_options or key in hide_options:
continue
param_data = need_data[key]
if param_data is None or not param_data:
continue
param_option = '{}: '.format(key)
global_option_line = nodes.line(classes=['global_option'])
global_option_line.append(nodes.inline(param_option, param_option, classes=['global_option']))
global_option_line.append(nodes.inline(param_data, param_data,
classes=["needs-global-option", str(key)]))
node_global_options.append(global_option_line)
node_meta += node_global_options
return node_meta | Constructs the node-structure for the status container
:param need_data: need_info container
:return: node | Below is the the instruction that describes the task:
### Input:
Constructs the node-structure for the status container
:param need_data: need_info container
:return: node
### Response:
def construct_meta(need_data, env):
"""
Constructs the node-structure for the status container
:param need_data: need_info container
:return: node
"""
hide_options = env.config.needs_hide_options
if not isinstance(hide_options, list):
raise SphinxError('Config parameter needs_hide_options must be of type list')
node_meta = nodes.line_block(classes=['needs_meta'])
# need parameters
param_status = "status: "
param_tags = "tags: "
if need_data["status"] is not None and 'status' not in hide_options:
status_line = nodes.line(classes=['status'])
# node_status = nodes.line(param_status, param_status, classes=['status'])
node_status = nodes.inline(param_status, param_status, classes=['status'])
status_line.append(node_status)
status_line.append(nodes.inline(need_data["status"], need_data["status"],
classes=["needs-status", str(need_data['status'])]))
node_meta.append(status_line)
if need_data["tags"] and 'tags' not in hide_options:
tag_line = nodes.line(classes=['tags'])
# node_tags = nodes.line(param_tags, param_tags, classes=['tags'])
node_tags = nodes.inline(param_tags, param_tags, classes=['tags'])
tag_line.append(node_tags)
for tag in need_data['tags']:
# node_tags.append(nodes.inline(tag, tag, classes=["needs-tag", str(tag)]))
# node_tags.append(nodes.inline(' ', ' '))
tag_line.append(nodes.inline(tag, tag, classes=["needs-tag", str(tag)]))
tag_line.append(nodes.inline(' ', ' '))
node_meta.append(tag_line)
# Links incoming
if need_data['links_back'] and 'links_back' not in hide_options:
node_incoming_line = nodes.line(classes=['links', 'incoming'])
prefix = "links incoming: "
node_incoming_prefix = nodes.inline(prefix, prefix)
node_incoming_line.append(node_incoming_prefix)
node_incoming_links = Need_incoming(reftarget=need_data['id'])
node_incoming_links.append(nodes.inline(need_data['id'], need_data['id']))
node_incoming_line.append(node_incoming_links)
node_meta.append(node_incoming_line)
# # Links outgoing
if need_data['links'] and 'links' not in hide_options:
node_outgoing_line = nodes.line(classes=['links', 'outgoing'])
prefix = "links outgoing: "
node_outgoing_prefix = nodes.inline(prefix, prefix)
node_outgoing_line.append(node_outgoing_prefix)
node_outgoing_links = Need_outgoing(reftarget=need_data['id'])
node_outgoing_links.append(nodes.inline(need_data['id'], need_data['id']))
node_outgoing_line.append(node_outgoing_links)
node_meta.append(node_outgoing_line)
extra_options = getattr(env.config, 'needs_extra_options', {})
node_extra_options = []
for key, value in extra_options.items():
if key in hide_options:
continue
param_data = need_data[key]
if param_data is None or not param_data:
continue
param_option = '{}: '.format(key)
option_line = nodes.line(classes=['extra_option'])
option_line.append(nodes.inline(param_option, param_option, classes=['extra_option']))
option_line.append(nodes.inline(param_data, param_data,
classes=["needs-extra-option", str(key)]))
node_extra_options.append(option_line)
node_meta += node_extra_options
global_options = getattr(env.config, 'needs_global_options', {})
node_global_options = []
for key, value in global_options.items():
# If a global option got locally overwritten, it must already part of extra_options.
# In this skipp output, as this is done during extra_option handling
if key in extra_options or key in hide_options:
continue
param_data = need_data[key]
if param_data is None or not param_data:
continue
param_option = '{}: '.format(key)
global_option_line = nodes.line(classes=['global_option'])
global_option_line.append(nodes.inline(param_option, param_option, classes=['global_option']))
global_option_line.append(nodes.inline(param_data, param_data,
classes=["needs-global-option", str(key)]))
node_global_options.append(global_option_line)
node_meta += node_global_options
return node_meta |
def parse_int_list(string):
"""
Parses a string of numbers and ranges into a list of integers. Ranges
are separated by dashes and inclusive of both the start and end number.
Example:
parse_int_list("8 9 10,11-13") == [8,9,10,11,12,13]
"""
integers = []
for comma_part in string.split(","):
for substring in comma_part.split(" "):
if len(substring) == 0:
continue
if "-" in substring:
left, right = substring.split("-")
left_val = int(left.strip())
right_val = int(right.strip())
integers.extend(range(left_val, right_val + 1))
else:
integers.append(int(substring.strip()))
return integers | Parses a string of numbers and ranges into a list of integers. Ranges
are separated by dashes and inclusive of both the start and end number.
Example:
parse_int_list("8 9 10,11-13") == [8,9,10,11,12,13] | Below is the the instruction that describes the task:
### Input:
Parses a string of numbers and ranges into a list of integers. Ranges
are separated by dashes and inclusive of both the start and end number.
Example:
parse_int_list("8 9 10,11-13") == [8,9,10,11,12,13]
### Response:
def parse_int_list(string):
"""
Parses a string of numbers and ranges into a list of integers. Ranges
are separated by dashes and inclusive of both the start and end number.
Example:
parse_int_list("8 9 10,11-13") == [8,9,10,11,12,13]
"""
integers = []
for comma_part in string.split(","):
for substring in comma_part.split(" "):
if len(substring) == 0:
continue
if "-" in substring:
left, right = substring.split("-")
left_val = int(left.strip())
right_val = int(right.strip())
integers.extend(range(left_val, right_val + 1))
else:
integers.append(int(substring.strip()))
return integers |
def to_networkx(cyjs, directed=True):
"""
Convert Cytoscape.js-style JSON object into NetworkX object.
By default, data will be handles as a directed graph.
"""
if directed:
g = nx.MultiDiGraph()
else:
g = nx.MultiGraph()
network_data = cyjs[DATA]
if network_data is not None:
for key in network_data.keys():
g.graph[key] = network_data[key]
nodes = cyjs[ELEMENTS][NODES]
edges = cyjs[ELEMENTS][EDGES]
for node in nodes:
data = node[DATA]
g.add_node(data[ID], attr_dict=data)
for edge in edges:
data = edge[DATA]
source = data[SOURCE]
target = data[TARGET]
g.add_edge(source, target, attr_dict=data)
return g | Convert Cytoscape.js-style JSON object into NetworkX object.
By default, data will be handles as a directed graph. | Below is the the instruction that describes the task:
### Input:
Convert Cytoscape.js-style JSON object into NetworkX object.
By default, data will be handles as a directed graph.
### Response:
def to_networkx(cyjs, directed=True):
"""
Convert Cytoscape.js-style JSON object into NetworkX object.
By default, data will be handles as a directed graph.
"""
if directed:
g = nx.MultiDiGraph()
else:
g = nx.MultiGraph()
network_data = cyjs[DATA]
if network_data is not None:
for key in network_data.keys():
g.graph[key] = network_data[key]
nodes = cyjs[ELEMENTS][NODES]
edges = cyjs[ELEMENTS][EDGES]
for node in nodes:
data = node[DATA]
g.add_node(data[ID], attr_dict=data)
for edge in edges:
data = edge[DATA]
source = data[SOURCE]
target = data[TARGET]
g.add_edge(source, target, attr_dict=data)
return g |
def copyglob(src: str, dest: str, allow_nothing: bool = False,
allow_nonfiles: bool = False) -> None:
"""
Copies files whose filenames match the glob src" into the directory
"dest". Raises an error if no files are copied, unless allow_nothing is
True.
Args:
src: source glob (e.g. ``/somewhere/*.txt``)
dest: destination directory
allow_nothing: don't raise an exception if no files are found
allow_nonfiles: copy things that are not files too (as judged by
:func:`os.path.isfile`).
Raises:
ValueError: if no files are found and ``allow_nothing`` is not set
"""
something = False
for filename in glob.glob(src):
if allow_nonfiles or os.path.isfile(filename):
shutil.copy(filename, dest)
something = True
if something or allow_nothing:
return
raise ValueError("No files found matching: {}".format(src)) | Copies files whose filenames match the glob src" into the directory
"dest". Raises an error if no files are copied, unless allow_nothing is
True.
Args:
src: source glob (e.g. ``/somewhere/*.txt``)
dest: destination directory
allow_nothing: don't raise an exception if no files are found
allow_nonfiles: copy things that are not files too (as judged by
:func:`os.path.isfile`).
Raises:
ValueError: if no files are found and ``allow_nothing`` is not set | Below is the the instruction that describes the task:
### Input:
Copies files whose filenames match the glob src" into the directory
"dest". Raises an error if no files are copied, unless allow_nothing is
True.
Args:
src: source glob (e.g. ``/somewhere/*.txt``)
dest: destination directory
allow_nothing: don't raise an exception if no files are found
allow_nonfiles: copy things that are not files too (as judged by
:func:`os.path.isfile`).
Raises:
ValueError: if no files are found and ``allow_nothing`` is not set
### Response:
def copyglob(src: str, dest: str, allow_nothing: bool = False,
allow_nonfiles: bool = False) -> None:
"""
Copies files whose filenames match the glob src" into the directory
"dest". Raises an error if no files are copied, unless allow_nothing is
True.
Args:
src: source glob (e.g. ``/somewhere/*.txt``)
dest: destination directory
allow_nothing: don't raise an exception if no files are found
allow_nonfiles: copy things that are not files too (as judged by
:func:`os.path.isfile`).
Raises:
ValueError: if no files are found and ``allow_nothing`` is not set
"""
something = False
for filename in glob.glob(src):
if allow_nonfiles or os.path.isfile(filename):
shutil.copy(filename, dest)
something = True
if something or allow_nothing:
return
raise ValueError("No files found matching: {}".format(src)) |
def convert_idx_to_name(self, y, lens):
"""Convert label index to name.
Args:
y (list): label index list.
lens (list): true length of y.
Returns:
y: label name list.
Examples:
>>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'}
>>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]]
>>> lens = [1, 2, 3]
>>> self.convert_idx_to_name(y, lens)
[['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']]
"""
y = [[self.id2label[idx] for idx in row[:l]]
for row, l in zip(y, lens)]
return y | Convert label index to name.
Args:
y (list): label index list.
lens (list): true length of y.
Returns:
y: label name list.
Examples:
>>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'}
>>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]]
>>> lens = [1, 2, 3]
>>> self.convert_idx_to_name(y, lens)
[['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']] | Below is the the instruction that describes the task:
### Input:
Convert label index to name.
Args:
y (list): label index list.
lens (list): true length of y.
Returns:
y: label name list.
Examples:
>>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'}
>>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]]
>>> lens = [1, 2, 3]
>>> self.convert_idx_to_name(y, lens)
[['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']]
### Response:
def convert_idx_to_name(self, y, lens):
"""Convert label index to name.
Args:
y (list): label index list.
lens (list): true length of y.
Returns:
y: label name list.
Examples:
>>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'}
>>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]]
>>> lens = [1, 2, 3]
>>> self.convert_idx_to_name(y, lens)
[['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']]
"""
y = [[self.id2label[idx] for idx in row[:l]]
for row, l in zip(y, lens)]
return y |
def get_or_create_group(groupname, gid_preset, system=False, id_dependent=True):
"""
Returns the id for the given group, and creates it first in case it does not exist.
:param groupname: Group name.
:type groupname: unicode
:param gid_preset: Group id to set if a new group is created.
:type gid_preset: int or unicode
:param system: Create a system group.
:type system: bool
:param id_dependent: If the group exists, but its id does not match `gid_preset`, an error is thrown.
:type id_dependent: bool
:return: Group id of the existing or new group.
:rtype: int
"""
gid = get_group_id(groupname)
if gid is None:
create_group(groupname, gid_preset, system)
return gid_preset
elif id_dependent and gid != gid_preset:
error("Present group id '{0}' does not match the required id of the environment '{1}'.".format(gid, gid_preset))
return gid | Returns the id for the given group, and creates it first in case it does not exist.
:param groupname: Group name.
:type groupname: unicode
:param gid_preset: Group id to set if a new group is created.
:type gid_preset: int or unicode
:param system: Create a system group.
:type system: bool
:param id_dependent: If the group exists, but its id does not match `gid_preset`, an error is thrown.
:type id_dependent: bool
:return: Group id of the existing or new group.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Returns the id for the given group, and creates it first in case it does not exist.
:param groupname: Group name.
:type groupname: unicode
:param gid_preset: Group id to set if a new group is created.
:type gid_preset: int or unicode
:param system: Create a system group.
:type system: bool
:param id_dependent: If the group exists, but its id does not match `gid_preset`, an error is thrown.
:type id_dependent: bool
:return: Group id of the existing or new group.
:rtype: int
### Response:
def get_or_create_group(groupname, gid_preset, system=False, id_dependent=True):
"""
Returns the id for the given group, and creates it first in case it does not exist.
:param groupname: Group name.
:type groupname: unicode
:param gid_preset: Group id to set if a new group is created.
:type gid_preset: int or unicode
:param system: Create a system group.
:type system: bool
:param id_dependent: If the group exists, but its id does not match `gid_preset`, an error is thrown.
:type id_dependent: bool
:return: Group id of the existing or new group.
:rtype: int
"""
gid = get_group_id(groupname)
if gid is None:
create_group(groupname, gid_preset, system)
return gid_preset
elif id_dependent and gid != gid_preset:
error("Present group id '{0}' does not match the required id of the environment '{1}'.".format(gid, gid_preset))
return gid |
def save(filename=None, family='ipv4'):
'''
Save the current in-memory rules to disk
CLI Example:
.. code-block:: bash
salt '*' nftables.save /etc/nftables
'''
if _conf() and not filename:
filename = _conf()
nft_families = ['ip', 'ip6', 'arp', 'bridge']
rules = "#! nft -f\n"
for family in nft_families:
out = get_rules(family)
if out:
rules += '\n'
rules = rules + '\n'.join(out)
rules = rules + '\n'
try:
with salt.utils.files.fopen(filename, 'wb') as _fh:
# Write out any changes
_fh.writelines(salt.utils.data.encode(rules))
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Problem writing to configuration file: {0}'.format(exc)
)
return rules | Save the current in-memory rules to disk
CLI Example:
.. code-block:: bash
salt '*' nftables.save /etc/nftables | Below is the the instruction that describes the task:
### Input:
Save the current in-memory rules to disk
CLI Example:
.. code-block:: bash
salt '*' nftables.save /etc/nftables
### Response:
def save(filename=None, family='ipv4'):
'''
Save the current in-memory rules to disk
CLI Example:
.. code-block:: bash
salt '*' nftables.save /etc/nftables
'''
if _conf() and not filename:
filename = _conf()
nft_families = ['ip', 'ip6', 'arp', 'bridge']
rules = "#! nft -f\n"
for family in nft_families:
out = get_rules(family)
if out:
rules += '\n'
rules = rules + '\n'.join(out)
rules = rules + '\n'
try:
with salt.utils.files.fopen(filename, 'wb') as _fh:
# Write out any changes
_fh.writelines(salt.utils.data.encode(rules))
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Problem writing to configuration file: {0}'.format(exc)
)
return rules |
def speech(self) -> str:
"""
Report summary designed to be read by a text-to-speech program
"""
if not self.data:
self.update()
return speech.metar(self.data, self.units) | Report summary designed to be read by a text-to-speech program | Below is the the instruction that describes the task:
### Input:
Report summary designed to be read by a text-to-speech program
### Response:
def speech(self) -> str:
"""
Report summary designed to be read by a text-to-speech program
"""
if not self.data:
self.update()
return speech.metar(self.data, self.units) |
def prepare_environment(params: Params):
"""
Sets random seeds for reproducible experiments. This may not work as expected
if you use this from within a python project in which you have already imported Pytorch.
If you use the scripts/run_model.py entry point to training models with this library,
your experiments should be reasonably reproducible. If you are using this from your own
project, you will want to call this function before importing Pytorch. Complete determinism
is very difficult to achieve with libraries doing optimized linear algebra due to massively
parallel execution, which is exacerbated by using GPUs.
Parameters
----------
params: Params object or dict, required.
A ``Params`` object or dict holding the json parameters.
"""
seed = params.pop_int("random_seed", 13370)
numpy_seed = params.pop_int("numpy_seed", 1337)
torch_seed = params.pop_int("pytorch_seed", 133)
if seed is not None:
random.seed(seed)
if numpy_seed is not None:
numpy.random.seed(numpy_seed)
if torch_seed is not None:
torch.manual_seed(torch_seed)
# Seed all GPUs with the same seed if available.
if torch.cuda.is_available():
torch.cuda.manual_seed_all(torch_seed)
log_pytorch_version_info() | Sets random seeds for reproducible experiments. This may not work as expected
if you use this from within a python project in which you have already imported Pytorch.
If you use the scripts/run_model.py entry point to training models with this library,
your experiments should be reasonably reproducible. If you are using this from your own
project, you will want to call this function before importing Pytorch. Complete determinism
is very difficult to achieve with libraries doing optimized linear algebra due to massively
parallel execution, which is exacerbated by using GPUs.
Parameters
----------
params: Params object or dict, required.
A ``Params`` object or dict holding the json parameters. | Below is the the instruction that describes the task:
### Input:
Sets random seeds for reproducible experiments. This may not work as expected
if you use this from within a python project in which you have already imported Pytorch.
If you use the scripts/run_model.py entry point to training models with this library,
your experiments should be reasonably reproducible. If you are using this from your own
project, you will want to call this function before importing Pytorch. Complete determinism
is very difficult to achieve with libraries doing optimized linear algebra due to massively
parallel execution, which is exacerbated by using GPUs.
Parameters
----------
params: Params object or dict, required.
A ``Params`` object or dict holding the json parameters.
### Response:
def prepare_environment(params: Params):
"""
Sets random seeds for reproducible experiments. This may not work as expected
if you use this from within a python project in which you have already imported Pytorch.
If you use the scripts/run_model.py entry point to training models with this library,
your experiments should be reasonably reproducible. If you are using this from your own
project, you will want to call this function before importing Pytorch. Complete determinism
is very difficult to achieve with libraries doing optimized linear algebra due to massively
parallel execution, which is exacerbated by using GPUs.
Parameters
----------
params: Params object or dict, required.
A ``Params`` object or dict holding the json parameters.
"""
seed = params.pop_int("random_seed", 13370)
numpy_seed = params.pop_int("numpy_seed", 1337)
torch_seed = params.pop_int("pytorch_seed", 133)
if seed is not None:
random.seed(seed)
if numpy_seed is not None:
numpy.random.seed(numpy_seed)
if torch_seed is not None:
torch.manual_seed(torch_seed)
# Seed all GPUs with the same seed if available.
if torch.cuda.is_available():
torch.cuda.manual_seed_all(torch_seed)
log_pytorch_version_info() |
async def get_timezone(self) -> Optional[tzinfo]:
"""
We can't exactly know the time zone of the user from what Facebook
gives (fucking morons) but we can still give something that'll work
until next DST.
"""
u = await self._get_user()
diff = float(u.get('timezone', 0)) * 3600.0
return tz.tzoffset('ITC', diff) | We can't exactly know the time zone of the user from what Facebook
gives (fucking morons) but we can still give something that'll work
until next DST. | Below is the the instruction that describes the task:
### Input:
We can't exactly know the time zone of the user from what Facebook
gives (fucking morons) but we can still give something that'll work
until next DST.
### Response:
async def get_timezone(self) -> Optional[tzinfo]:
"""
We can't exactly know the time zone of the user from what Facebook
gives (fucking morons) but we can still give something that'll work
until next DST.
"""
u = await self._get_user()
diff = float(u.get('timezone', 0)) * 3600.0
return tz.tzoffset('ITC', diff) |
def get_default_user_groups(self, **kwargs): # noqa: E501
"""Get default user groups customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_default_user_groups(async_req=True)
>>> result = thread.get()
:param async_req bool
:param User body:
:return: ResponseContainerListUserGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_default_user_groups_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_default_user_groups_with_http_info(**kwargs) # noqa: E501
return data | Get default user groups customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_default_user_groups(async_req=True)
>>> result = thread.get()
:param async_req bool
:param User body:
:return: ResponseContainerListUserGroup
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Get default user groups customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_default_user_groups(async_req=True)
>>> result = thread.get()
:param async_req bool
:param User body:
:return: ResponseContainerListUserGroup
If the method is called asynchronously,
returns the request thread.
### Response:
def get_default_user_groups(self, **kwargs): # noqa: E501
"""Get default user groups customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_default_user_groups(async_req=True)
>>> result = thread.get()
:param async_req bool
:param User body:
:return: ResponseContainerListUserGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_default_user_groups_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_default_user_groups_with_http_info(**kwargs) # noqa: E501
return data |
def word_tokenize(sentence):
"""
A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
"""
date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?'
number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)'
arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]'
word_pattern = r'[\w]+'
non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~'))
space_pattern = r'\s'
anything_pattern = r'.'
patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern]
big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns])
for match in re.finditer(big_pattern, sentence):
yield match.group(0) | A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.'] | Below is the the instruction that describes the task:
### Input:
A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
### Response:
def word_tokenize(sentence):
"""
A generator which yields tokens based on the given sentence without deleting anything.
>>> context = "I love you. Please don't leave."
>>> list(word_tokenize(context))
['I', ' ', 'love', ' ', 'you', '.', ' ', 'Please', ' ', 'don', "'", 't', ' ', 'leave', '.']
"""
date_pattern = r'\d\d(\d\d)?[\\-]\d\d[\\-]\d\d(\d\d)?'
number_pattern = r'[\+-]?(\d+\.\d+|\d{1,3},(\d{3},)*\d{3}|\d+)'
arr_pattern = r'(?: \w\.){2,3}|(?:\A|\s)(?:\w\.){2,3}|[A-Z]\. [a-z]'
word_pattern = r'[\w]+'
non_space_pattern = r'[{}]|\w'.format(re.escape('!"#$%&()*,./:;<=>?@[\]^_-`{|}~'))
space_pattern = r'\s'
anything_pattern = r'.'
patterns = [date_pattern, number_pattern, arr_pattern, word_pattern, non_space_pattern, space_pattern, anything_pattern]
big_pattern = r'|'.join([('(' + pattern + ')') for pattern in patterns])
for match in re.finditer(big_pattern, sentence):
yield match.group(0) |
def update_state(self):
"""
Update state with latest info from Wink API.
"""
response = self.api_interface.get_device_state(self, type_override="button")
return self._update_state_from_response(response) | Update state with latest info from Wink API. | Below is the the instruction that describes the task:
### Input:
Update state with latest info from Wink API.
### Response:
def update_state(self):
"""
Update state with latest info from Wink API.
"""
response = self.api_interface.get_device_state(self, type_override="button")
return self._update_state_from_response(response) |
def get_score(self, terms):
"""Get score for a list of terms.
:type terms: list
:param terms: A list of terms to be analyzed.
:returns: dict
"""
assert isinstance(terms, list) or isinstance(terms, tuple)
score_li = np.asarray([self._get_score(t) for t in terms])
s_pos = np.sum(score_li[score_li > 0])
s_neg = -np.sum(score_li[score_li < 0])
s_pol = (s_pos-s_neg) * 1.0 / ((s_pos+s_neg)+self.EPSILON)
s_sub = (s_pos+s_neg) * 1.0 / (len(score_li)+self.EPSILON)
return {self.TAG_POS: s_pos,
self.TAG_NEG: s_neg,
self.TAG_POL: s_pol,
self.TAG_SUB: s_sub} | Get score for a list of terms.
:type terms: list
:param terms: A list of terms to be analyzed.
:returns: dict | Below is the the instruction that describes the task:
### Input:
Get score for a list of terms.
:type terms: list
:param terms: A list of terms to be analyzed.
:returns: dict
### Response:
def get_score(self, terms):
"""Get score for a list of terms.
:type terms: list
:param terms: A list of terms to be analyzed.
:returns: dict
"""
assert isinstance(terms, list) or isinstance(terms, tuple)
score_li = np.asarray([self._get_score(t) for t in terms])
s_pos = np.sum(score_li[score_li > 0])
s_neg = -np.sum(score_li[score_li < 0])
s_pol = (s_pos-s_neg) * 1.0 / ((s_pos+s_neg)+self.EPSILON)
s_sub = (s_pos+s_neg) * 1.0 / (len(score_li)+self.EPSILON)
return {self.TAG_POS: s_pos,
self.TAG_NEG: s_neg,
self.TAG_POL: s_pol,
self.TAG_SUB: s_sub} |
def find(self, item_id=None):
"Recursively find a menu item by its id (useful for event handlers)"
for it in self:
if it.id == item_id:
return it
elif isinstance(it, Menu):
found = it.find(item_id)
if found:
return found | Recursively find a menu item by its id (useful for event handlers) | Below is the the instruction that describes the task:
### Input:
Recursively find a menu item by its id (useful for event handlers)
### Response:
def find(self, item_id=None):
"Recursively find a menu item by its id (useful for event handlers)"
for it in self:
if it.id == item_id:
return it
elif isinstance(it, Menu):
found = it.find(item_id)
if found:
return found |
def _repr_latex_(self):
"""
This is used in IPython notebook it allows us to render the ODEProblem object in LaTeX.
How Cool is this?
"""
# TODO: we're mixing HTML with latex here. That is not necessarily a good idea, but works
# with IPython 1.2.0. Once IPython 2.0 is released, this needs to be changed to _ipython_display_
lines = []
lines.append(r"<h1>{0}</h1>".format(self.__class__.__name__))
lines.append("<p>Method: <code>{0!r}</code></p>".format(self.method))
lines.append("<p>Parameters: <code>{0!r}</code></p>".format(self.parameters))
lines.append("<p>Terms:</p>")
lines.append("<ul>")
lines.extend(['<li><code>{0!r}</code></li>'.format(lhs) for lhs in self.left_hand_side_descriptors])
lines.append("</ul>")
lines.append('<hr />')
lines.append(r"\begin{align*}")
for lhs, rhs in zip(self.left_hand_side_descriptors, self.right_hand_side):
lines.append(r"\dot{{{0}}} &= {1} \\".format(sympy.latex(lhs.symbol), sympy.latex(rhs)))
lines.append(r"\end{align*}")
return "\n".join(lines) | This is used in IPython notebook it allows us to render the ODEProblem object in LaTeX.
How Cool is this? | Below is the the instruction that describes the task:
### Input:
This is used in IPython notebook it allows us to render the ODEProblem object in LaTeX.
How Cool is this?
### Response:
def _repr_latex_(self):
"""
This is used in IPython notebook it allows us to render the ODEProblem object in LaTeX.
How Cool is this?
"""
# TODO: we're mixing HTML with latex here. That is not necessarily a good idea, but works
# with IPython 1.2.0. Once IPython 2.0 is released, this needs to be changed to _ipython_display_
lines = []
lines.append(r"<h1>{0}</h1>".format(self.__class__.__name__))
lines.append("<p>Method: <code>{0!r}</code></p>".format(self.method))
lines.append("<p>Parameters: <code>{0!r}</code></p>".format(self.parameters))
lines.append("<p>Terms:</p>")
lines.append("<ul>")
lines.extend(['<li><code>{0!r}</code></li>'.format(lhs) for lhs in self.left_hand_side_descriptors])
lines.append("</ul>")
lines.append('<hr />')
lines.append(r"\begin{align*}")
for lhs, rhs in zip(self.left_hand_side_descriptors, self.right_hand_side):
lines.append(r"\dot{{{0}}} &= {1} \\".format(sympy.latex(lhs.symbol), sympy.latex(rhs)))
lines.append(r"\end{align*}")
return "\n".join(lines) |
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
"""Gets a query from the data source, which contains a request for multiple objects.
Args:
query: The query being requested (contains a request for multiple objects).
context: The context for the extraction (mutable).
Returns:
The requested objects.
"""
pass | Gets a query from the data source, which contains a request for multiple objects.
Args:
query: The query being requested (contains a request for multiple objects).
context: The context for the extraction (mutable).
Returns:
The requested objects. | Below is the the instruction that describes the task:
### Input:
Gets a query from the data source, which contains a request for multiple objects.
Args:
query: The query being requested (contains a request for multiple objects).
context: The context for the extraction (mutable).
Returns:
The requested objects.
### Response:
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
"""Gets a query from the data source, which contains a request for multiple objects.
Args:
query: The query being requested (contains a request for multiple objects).
context: The context for the extraction (mutable).
Returns:
The requested objects.
"""
pass |
def get_template_uuid(self):
"""
Retrieves the uuid of the given template name.
"""
response = requests.get(self.url + 'editor/scan/templates', headers=self.headers, verify=False)
templates = json.loads(response.text)
for template in templates['templates']:
if template['name'] == self.template_name:
return template['uuid'] | Retrieves the uuid of the given template name. | Below is the the instruction that describes the task:
### Input:
Retrieves the uuid of the given template name.
### Response:
def get_template_uuid(self):
"""
Retrieves the uuid of the given template name.
"""
response = requests.get(self.url + 'editor/scan/templates', headers=self.headers, verify=False)
templates = json.loads(response.text)
for template in templates['templates']:
if template['name'] == self.template_name:
return template['uuid'] |
def get_type_description(self, _type, suffix='', *args, **kwargs):
""" Get description of type
:param suffix:
:param str _type:
:rtype: str
"""
if not SchemaObjects.contains(_type):
return _type
schema = SchemaObjects.get(_type)
if schema.all_of:
models = ','.join(
(self.get_type_description(_type, *args, **kwargs) for _type in schema.all_of)
)
result = '{}'.format(models.split(',')[0])
for r in models.split(',')[1:]:
result += ' extended {}'.format(r)
elif schema.is_array:
result = 'array of {}'.format(
self.get_type_description(schema.item['type'], *args, **kwargs))
else:
result = ':ref:`{} <{}{}>`'.format(schema.name, schema.schema_id, suffix)
return result | Get description of type
:param suffix:
:param str _type:
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get description of type
:param suffix:
:param str _type:
:rtype: str
### Response:
def get_type_description(self, _type, suffix='', *args, **kwargs):
""" Get description of type
:param suffix:
:param str _type:
:rtype: str
"""
if not SchemaObjects.contains(_type):
return _type
schema = SchemaObjects.get(_type)
if schema.all_of:
models = ','.join(
(self.get_type_description(_type, *args, **kwargs) for _type in schema.all_of)
)
result = '{}'.format(models.split(',')[0])
for r in models.split(',')[1:]:
result += ' extended {}'.format(r)
elif schema.is_array:
result = 'array of {}'.format(
self.get_type_description(schema.item['type'], *args, **kwargs))
else:
result = ':ref:`{} <{}{}>`'.format(schema.name, schema.schema_id, suffix)
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.