code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def forward(node, analysis):
"""Perform a given analysis on all functions within an AST."""
if not isinstance(analysis, Forward):
raise TypeError('not a valid forward analysis object')
for succ in gast.walk(node):
if isinstance(succ, gast.FunctionDef):
cfg_obj = CFG.build_cfg(succ)
analysis.visit(cfg_obj.entry)
return node | Perform a given analysis on all functions within an AST. | Below is the the instruction that describes the task:
### Input:
Perform a given analysis on all functions within an AST.
### Response:
def forward(node, analysis):
"""Perform a given analysis on all functions within an AST."""
if not isinstance(analysis, Forward):
raise TypeError('not a valid forward analysis object')
for succ in gast.walk(node):
if isinstance(succ, gast.FunctionDef):
cfg_obj = CFG.build_cfg(succ)
analysis.visit(cfg_obj.entry)
return node |
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None | Get XEN sr (storage repo) object reference | Below is the the instruction that describes the task:
### Input:
Get XEN sr (storage repo) object reference
### Response:
def _get_sr(name=None, session=None):
'''
Get XEN sr (storage repo) object reference
'''
if session is None:
session = _get_session()
srs = session.xenapi.SR.get_by_name_label(name)
if len(srs) == 1:
return srs[0]
return None |
def get_raw_fixed_block(self, unbuffered=False):
"""Get the raw "fixed block" of settings and min/max data."""
if unbuffered or not self._fixed_block:
self._fixed_block = self._read_fixed_block()
return self._fixed_block | Get the raw "fixed block" of settings and min/max data. | Below is the the instruction that describes the task:
### Input:
Get the raw "fixed block" of settings and min/max data.
### Response:
def get_raw_fixed_block(self, unbuffered=False):
"""Get the raw "fixed block" of settings and min/max data."""
if unbuffered or not self._fixed_block:
self._fixed_block = self._read_fixed_block()
return self._fixed_block |
def fit(self, x0=None, distribution='lognormal', n=None, **kwargs):
'''Incomplete method to fit experimental values to a curve. It is very
hard to get good initial guesses, which are really required for this.
Differential evolution is promissing. This API is likely to change in
the future.
'''
dist = {'lognormal': PSDLognormal,
'GGS': PSDGatesGaudinSchuhman,
'RR': PSDRosinRammler}[distribution]
if distribution == 'lognormal':
if x0 is None:
d_characteristic = sum([fi*di for fi, di in zip(self.fractions, self.Dis)])
s = 0.4
x0 = [d_characteristic, s]
elif distribution == 'GGS':
if x0 is None:
d_characteristic = sum([fi*di for fi, di in zip(self.fractions, self.Dis)])
m = 1.5
x0 = [d_characteristic, m]
elif distribution == 'RR':
if x0 is None:
x0 = [5E-6, 1e-2]
from scipy.optimize import minimize
return minimize(self._fit_obj_function, x0, args=(dist, n), **kwargs) | Incomplete method to fit experimental values to a curve. It is very
hard to get good initial guesses, which are really required for this.
Differential evolution is promissing. This API is likely to change in
the future. | Below is the the instruction that describes the task:
### Input:
Incomplete method to fit experimental values to a curve. It is very
hard to get good initial guesses, which are really required for this.
Differential evolution is promissing. This API is likely to change in
the future.
### Response:
def fit(self, x0=None, distribution='lognormal', n=None, **kwargs):
'''Incomplete method to fit experimental values to a curve. It is very
hard to get good initial guesses, which are really required for this.
Differential evolution is promissing. This API is likely to change in
the future.
'''
dist = {'lognormal': PSDLognormal,
'GGS': PSDGatesGaudinSchuhman,
'RR': PSDRosinRammler}[distribution]
if distribution == 'lognormal':
if x0 is None:
d_characteristic = sum([fi*di for fi, di in zip(self.fractions, self.Dis)])
s = 0.4
x0 = [d_characteristic, s]
elif distribution == 'GGS':
if x0 is None:
d_characteristic = sum([fi*di for fi, di in zip(self.fractions, self.Dis)])
m = 1.5
x0 = [d_characteristic, m]
elif distribution == 'RR':
if x0 is None:
x0 = [5E-6, 1e-2]
from scipy.optimize import minimize
return minimize(self._fit_obj_function, x0, args=(dist, n), **kwargs) |
def save_report(
self,
name,
address=True):
"""
Save Compare report in .comp (flat file format).
:param name: filename
:type name : str
:param address: flag for address return
:type address : bool
:return: saving Status as dict {"Status":bool , "Message":str}
"""
try:
message = None
file = open(name + ".comp", "w")
report = compare_report_print(
self.sorted, self.scores, self.best_name)
file.write(report)
file.close()
if address:
message = os.path.join(os.getcwd(), name + ".comp")
return {"Status": True, "Message": message}
except Exception as e:
return {"Status": False, "Message": str(e)} | Save Compare report in .comp (flat file format).
:param name: filename
:type name : str
:param address: flag for address return
:type address : bool
:return: saving Status as dict {"Status":bool , "Message":str} | Below is the the instruction that describes the task:
### Input:
Save Compare report in .comp (flat file format).
:param name: filename
:type name : str
:param address: flag for address return
:type address : bool
:return: saving Status as dict {"Status":bool , "Message":str}
### Response:
def save_report(
self,
name,
address=True):
"""
Save Compare report in .comp (flat file format).
:param name: filename
:type name : str
:param address: flag for address return
:type address : bool
:return: saving Status as dict {"Status":bool , "Message":str}
"""
try:
message = None
file = open(name + ".comp", "w")
report = compare_report_print(
self.sorted, self.scores, self.best_name)
file.write(report)
file.close()
if address:
message = os.path.join(os.getcwd(), name + ".comp")
return {"Status": True, "Message": message}
except Exception as e:
return {"Status": False, "Message": str(e)} |
def unset_iscsi_info(self):
"""Disable iSCSI boot option in UEFI boot mode.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the BIOS boot mode.
"""
if(self._is_boot_mode_uefi()):
iscsi_info = {'iSCSIConnection': 'Disabled'}
self._change_iscsi_target_settings(iscsi_info)
else:
msg = 'iSCSI boot is not supported in the BIOS boot mode'
raise exception.IloCommandNotSupportedInBiosError(msg) | Disable iSCSI boot option in UEFI boot mode.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the BIOS boot mode. | Below is the the instruction that describes the task:
### Input:
Disable iSCSI boot option in UEFI boot mode.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the BIOS boot mode.
### Response:
def unset_iscsi_info(self):
"""Disable iSCSI boot option in UEFI boot mode.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the BIOS boot mode.
"""
if(self._is_boot_mode_uefi()):
iscsi_info = {'iSCSIConnection': 'Disabled'}
self._change_iscsi_target_settings(iscsi_info)
else:
msg = 'iSCSI boot is not supported in the BIOS boot mode'
raise exception.IloCommandNotSupportedInBiosError(msg) |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'notice_id') and self.notice_id is not None:
_dict['notice_id'] = self.notice_id
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self, 'query_id') and self.query_id is not None:
_dict['query_id'] = self.query_id
if hasattr(self, 'severity') and self.severity is not None:
_dict['severity'] = self.severity
if hasattr(self, 'step') and self.step is not None:
_dict['step'] = self.step
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
return _dict | Return a json dictionary representing this model. | Below is the the instruction that describes the task:
### Input:
Return a json dictionary representing this model.
### Response:
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'notice_id') and self.notice_id is not None:
_dict['notice_id'] = self.notice_id
if hasattr(self, 'created') and self.created is not None:
_dict['created'] = datetime_to_string(self.created)
if hasattr(self, 'document_id') and self.document_id is not None:
_dict['document_id'] = self.document_id
if hasattr(self, 'query_id') and self.query_id is not None:
_dict['query_id'] = self.query_id
if hasattr(self, 'severity') and self.severity is not None:
_dict['severity'] = self.severity
if hasattr(self, 'step') and self.step is not None:
_dict['step'] = self.step
if hasattr(self, 'description') and self.description is not None:
_dict['description'] = self.description
return _dict |
def predict_proba(self, a, b, **kwargs):
"""Evaluate a pair using the IGCI model.
:param a: Input variable 1D
:param b: Input variable 1D
:param kwargs: {refMeasure: Scaling method (gaussian, integral or None),
estimator: method used to evaluate the pairs (entropy or integral)}
:return: Return value of the IGCI model >0 if a->b otherwise if return <0
"""
estimators = {'entropy': lambda x, y: eval_entropy(y) - eval_entropy(x), 'integral': integral_approx_estimator}
ref_measures = {'gaussian': lambda x: standard_scale.fit_transform(x.reshape((-1, 1))),
'uniform': lambda x: min_max_scale.fit_transform(x.reshape((-1, 1))), 'None': lambda x: x}
ref_measure = ref_measures[kwargs.get('refMeasure', 'gaussian')]
estimator = estimators[kwargs.get('estimator', 'entropy')]
a = ref_measure(a)
b = ref_measure(b)
return estimator(a, b) | Evaluate a pair using the IGCI model.
:param a: Input variable 1D
:param b: Input variable 1D
:param kwargs: {refMeasure: Scaling method (gaussian, integral or None),
estimator: method used to evaluate the pairs (entropy or integral)}
:return: Return value of the IGCI model >0 if a->b otherwise if return <0 | Below is the the instruction that describes the task:
### Input:
Evaluate a pair using the IGCI model.
:param a: Input variable 1D
:param b: Input variable 1D
:param kwargs: {refMeasure: Scaling method (gaussian, integral or None),
estimator: method used to evaluate the pairs (entropy or integral)}
:return: Return value of the IGCI model >0 if a->b otherwise if return <0
### Response:
def predict_proba(self, a, b, **kwargs):
"""Evaluate a pair using the IGCI model.
:param a: Input variable 1D
:param b: Input variable 1D
:param kwargs: {refMeasure: Scaling method (gaussian, integral or None),
estimator: method used to evaluate the pairs (entropy or integral)}
:return: Return value of the IGCI model >0 if a->b otherwise if return <0
"""
estimators = {'entropy': lambda x, y: eval_entropy(y) - eval_entropy(x), 'integral': integral_approx_estimator}
ref_measures = {'gaussian': lambda x: standard_scale.fit_transform(x.reshape((-1, 1))),
'uniform': lambda x: min_max_scale.fit_transform(x.reshape((-1, 1))), 'None': lambda x: x}
ref_measure = ref_measures[kwargs.get('refMeasure', 'gaussian')]
estimator = estimators[kwargs.get('estimator', 'entropy')]
a = ref_measure(a)
b = ref_measure(b)
return estimator(a, b) |
def upload(self, file_path, dataset=None, public=False):
"""Use this function to upload data to Knoema dataset."""
upload_status = self.upload_file(file_path)
err_msg = 'Dataset has not been uploaded to the remote host'
if not upload_status.successful:
msg = '{}, because of the following error: {}'.format(err_msg, upload_status.error)
raise ValueError(msg)
err_msg = 'File has not been verified'
upload_ver_status = self.upload_verify(upload_status.properties.location, dataset)
if not upload_ver_status.successful:
ver_err = '\r\n'.join(upload_ver_status.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
ds_upload = definition.DatasetUpload(upload_ver_status, upload_status, dataset, public)
ds_upload_submit_result = self.upload_submit(ds_upload)
err_msg = 'Dataset has not been saved to the database'
if ds_upload_submit_result.status == 'failed':
ver_err = '\r\n'.join(ds_upload_submit_result.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
ds_upload_result = None
while True:
ds_upload_result = self.upload_status(ds_upload_submit_result.submit_id)
if ds_upload_result.status == 'pending' or ds_upload_result.status == 'processing':
time.sleep(5)
else:
break
if ds_upload_result.status != 'successful':
ver_err = '\r\n'.join(ds_upload_result.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
return ds_upload_result.dataset | Use this function to upload data to Knoema dataset. | Below is the the instruction that describes the task:
### Input:
Use this function to upload data to Knoema dataset.
### Response:
def upload(self, file_path, dataset=None, public=False):
"""Use this function to upload data to Knoema dataset."""
upload_status = self.upload_file(file_path)
err_msg = 'Dataset has not been uploaded to the remote host'
if not upload_status.successful:
msg = '{}, because of the following error: {}'.format(err_msg, upload_status.error)
raise ValueError(msg)
err_msg = 'File has not been verified'
upload_ver_status = self.upload_verify(upload_status.properties.location, dataset)
if not upload_ver_status.successful:
ver_err = '\r\n'.join(upload_ver_status.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
ds_upload = definition.DatasetUpload(upload_ver_status, upload_status, dataset, public)
ds_upload_submit_result = self.upload_submit(ds_upload)
err_msg = 'Dataset has not been saved to the database'
if ds_upload_submit_result.status == 'failed':
ver_err = '\r\n'.join(ds_upload_submit_result.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
ds_upload_result = None
while True:
ds_upload_result = self.upload_status(ds_upload_submit_result.submit_id)
if ds_upload_result.status == 'pending' or ds_upload_result.status == 'processing':
time.sleep(5)
else:
break
if ds_upload_result.status != 'successful':
ver_err = '\r\n'.join(ds_upload_result.errors)
msg = '{}, because of the following error(s): {}'.format(err_msg, ver_err)
raise ValueError(msg)
return ds_upload_result.dataset |
def do_scan_all(self, line):
"""Call ScanAllObjects. Command syntax is: scan_all"""
self.application.master.ScanAllObjects(opendnp3.GroupVariationID(2, 1), opendnp3.TaskConfig().Default()) | Call ScanAllObjects. Command syntax is: scan_all | Below is the the instruction that describes the task:
### Input:
Call ScanAllObjects. Command syntax is: scan_all
### Response:
def do_scan_all(self, line):
"""Call ScanAllObjects. Command syntax is: scan_all"""
self.application.master.ScanAllObjects(opendnp3.GroupVariationID(2, 1), opendnp3.TaskConfig().Default()) |
def handle_connack(self):
"""Handle incoming CONNACK command."""
self.logger.info("CONNACK reveived")
ret, flags = self.in_packet.read_byte()
if ret != NC.ERR_SUCCESS:
self.logger.error("error read byte")
return ret
# useful for v3.1.1 only
session_present = flags & 0x01
ret, retcode = self.in_packet.read_byte()
if ret != NC.ERR_SUCCESS:
return ret
evt = event.EventConnack(retcode, session_present)
self.push_event(evt)
if retcode == NC.CONNECT_ACCEPTED:
self.state = NC.CS_CONNECTED
return NC.ERR_SUCCESS
elif retcode >= 1 and retcode <= 5:
return NC.ERR_CONN_REFUSED
else:
return NC.ERR_PROTOCOL | Handle incoming CONNACK command. | Below is the the instruction that describes the task:
### Input:
Handle incoming CONNACK command.
### Response:
def handle_connack(self):
"""Handle incoming CONNACK command."""
self.logger.info("CONNACK reveived")
ret, flags = self.in_packet.read_byte()
if ret != NC.ERR_SUCCESS:
self.logger.error("error read byte")
return ret
# useful for v3.1.1 only
session_present = flags & 0x01
ret, retcode = self.in_packet.read_byte()
if ret != NC.ERR_SUCCESS:
return ret
evt = event.EventConnack(retcode, session_present)
self.push_event(evt)
if retcode == NC.CONNECT_ACCEPTED:
self.state = NC.CS_CONNECTED
return NC.ERR_SUCCESS
elif retcode >= 1 and retcode <= 5:
return NC.ERR_CONN_REFUSED
else:
return NC.ERR_PROTOCOL |
def get_skos(self, id=None, uri=None, match=None):
"""
get the saved skos concept with given ID or via other methods...
Note: it tries to guess what is being passed as above
"""
if not id and not uri and not match:
return None
if type(id) == type("string"):
uri = id
id = None
if not is_http(uri):
match = uri
uri = None
if match:
if type(match) != type("string"):
return []
res = []
if ":" in match: # qname
for x in self.all_skos_concepts:
if match.lower() in x.qname.lower():
res += [x]
else:
for x in self.all_skos_concepts:
if match.lower() in x.uri.lower():
res += [x]
return res
else:
for x in self.all_skos_concepts:
if id and x.id == id:
return x
if uri and x.uri.lower() == uri.lower():
return x
return None | get the saved skos concept with given ID or via other methods...
Note: it tries to guess what is being passed as above | Below is the the instruction that describes the task:
### Input:
get the saved skos concept with given ID or via other methods...
Note: it tries to guess what is being passed as above
### Response:
def get_skos(self, id=None, uri=None, match=None):
"""
get the saved skos concept with given ID or via other methods...
Note: it tries to guess what is being passed as above
"""
if not id and not uri and not match:
return None
if type(id) == type("string"):
uri = id
id = None
if not is_http(uri):
match = uri
uri = None
if match:
if type(match) != type("string"):
return []
res = []
if ":" in match: # qname
for x in self.all_skos_concepts:
if match.lower() in x.qname.lower():
res += [x]
else:
for x in self.all_skos_concepts:
if match.lower() in x.uri.lower():
res += [x]
return res
else:
for x in self.all_skos_concepts:
if id and x.id == id:
return x
if uri and x.uri.lower() == uri.lower():
return x
return None |
def fix(self, value=None):
"""
Fix all instances of this variable to a value if provided or to
their current value otherwise.
Args:
value: value to be set.
"""
if value is None:
self._impl.fix()
else:
self._impl.fix(value) | Fix all instances of this variable to a value if provided or to
their current value otherwise.
Args:
value: value to be set. | Below is the the instruction that describes the task:
### Input:
Fix all instances of this variable to a value if provided or to
their current value otherwise.
Args:
value: value to be set.
### Response:
def fix(self, value=None):
"""
Fix all instances of this variable to a value if provided or to
their current value otherwise.
Args:
value: value to be set.
"""
if value is None:
self._impl.fix()
else:
self._impl.fix(value) |
def __diff_dict(self,
level,
parents_ids=frozenset({}),
print_as_attribute=False,
override=False,
override_t1=None,
override_t2=None):
"""Difference of 2 dictionaries"""
if override:
# for special stuff like custom objects and named tuples we receive preprocessed t1 and t2
# but must not spoil the chain (=level) with it
t1 = override_t1
t2 = override_t2
else:
t1 = level.t1
t2 = level.t2
if print_as_attribute:
item_added_key = "attribute_added"
item_removed_key = "attribute_removed"
rel_class = AttributeRelationship
else:
item_added_key = "dictionary_item_added"
item_removed_key = "dictionary_item_removed"
rel_class = DictRelationship
t1_keys = set(t1.keys())
t2_keys = set(t2.keys())
if self.ignore_string_type_changes or self.ignore_numeric_type_changes:
t1_clean_to_keys = self.__get_clean_to_keys_mapping(keys=t1_keys, level=level)
t2_clean_to_keys = self.__get_clean_to_keys_mapping(keys=t2_keys, level=level)
t1_keys = set(t1_clean_to_keys.keys())
t2_keys = set(t2_clean_to_keys.keys())
else:
t1_clean_to_keys = t2_clean_to_keys = None
t_keys_intersect = t2_keys.intersection(t1_keys)
t_keys_added = t2_keys - t_keys_intersect
t_keys_removed = t1_keys - t_keys_intersect
for key in t_keys_added:
key = t2_clean_to_keys[key] if t2_clean_to_keys else key
change_level = level.branch_deeper(
notpresent,
t2[key],
child_relationship_class=rel_class,
child_relationship_param=key)
self.__report_result(item_added_key, change_level)
for key in t_keys_removed:
key = t1_clean_to_keys[key] if t1_clean_to_keys else key
change_level = level.branch_deeper(
t1[key],
notpresent,
child_relationship_class=rel_class,
child_relationship_param=key)
self.__report_result(item_removed_key, change_level)
for key in t_keys_intersect: # key present in both dicts - need to compare values
key1 = t1_clean_to_keys[key] if t1_clean_to_keys else key
key2 = t2_clean_to_keys[key] if t2_clean_to_keys else key
item_id = id(t1[key1])
if parents_ids and item_id in parents_ids:
continue
parents_ids_added = add_to_frozen_set(parents_ids, item_id)
# Go one level deeper
next_level = level.branch_deeper(
t1[key1],
t2[key2],
child_relationship_class=rel_class,
child_relationship_param=key)
self.__diff(next_level, parents_ids_added) | Difference of 2 dictionaries | Below is the the instruction that describes the task:
### Input:
Difference of 2 dictionaries
### Response:
def __diff_dict(self,
level,
parents_ids=frozenset({}),
print_as_attribute=False,
override=False,
override_t1=None,
override_t2=None):
"""Difference of 2 dictionaries"""
if override:
# for special stuff like custom objects and named tuples we receive preprocessed t1 and t2
# but must not spoil the chain (=level) with it
t1 = override_t1
t2 = override_t2
else:
t1 = level.t1
t2 = level.t2
if print_as_attribute:
item_added_key = "attribute_added"
item_removed_key = "attribute_removed"
rel_class = AttributeRelationship
else:
item_added_key = "dictionary_item_added"
item_removed_key = "dictionary_item_removed"
rel_class = DictRelationship
t1_keys = set(t1.keys())
t2_keys = set(t2.keys())
if self.ignore_string_type_changes or self.ignore_numeric_type_changes:
t1_clean_to_keys = self.__get_clean_to_keys_mapping(keys=t1_keys, level=level)
t2_clean_to_keys = self.__get_clean_to_keys_mapping(keys=t2_keys, level=level)
t1_keys = set(t1_clean_to_keys.keys())
t2_keys = set(t2_clean_to_keys.keys())
else:
t1_clean_to_keys = t2_clean_to_keys = None
t_keys_intersect = t2_keys.intersection(t1_keys)
t_keys_added = t2_keys - t_keys_intersect
t_keys_removed = t1_keys - t_keys_intersect
for key in t_keys_added:
key = t2_clean_to_keys[key] if t2_clean_to_keys else key
change_level = level.branch_deeper(
notpresent,
t2[key],
child_relationship_class=rel_class,
child_relationship_param=key)
self.__report_result(item_added_key, change_level)
for key in t_keys_removed:
key = t1_clean_to_keys[key] if t1_clean_to_keys else key
change_level = level.branch_deeper(
t1[key],
notpresent,
child_relationship_class=rel_class,
child_relationship_param=key)
self.__report_result(item_removed_key, change_level)
for key in t_keys_intersect: # key present in both dicts - need to compare values
key1 = t1_clean_to_keys[key] if t1_clean_to_keys else key
key2 = t2_clean_to_keys[key] if t2_clean_to_keys else key
item_id = id(t1[key1])
if parents_ids and item_id in parents_ids:
continue
parents_ids_added = add_to_frozen_set(parents_ids, item_id)
# Go one level deeper
next_level = level.branch_deeper(
t1[key1],
t2[key2],
child_relationship_class=rel_class,
child_relationship_param=key)
self.__diff(next_level, parents_ids_added) |
def _init_filename(self, filename=None, ext=None):
"""Initialize the current filename :attr:`FileUtils.real_filename` of the object.
Bit of a hack.
- The first invocation must have ``filename != None``; this will set a
default filename with suffix :attr:`FileUtils.default_extension`
unless another one was supplied.
- Subsequent invocations either change the filename accordingly or
ensure that the default filename is set with the proper suffix.
"""
extension = ext or self.default_extension
filename = self.filename(filename, ext=extension, use_my_ext=True, set_default=True)
#: Current full path of the object for reading and writing I/O.
self.real_filename = os.path.realpath(filename) | Initialize the current filename :attr:`FileUtils.real_filename` of the object.
Bit of a hack.
- The first invocation must have ``filename != None``; this will set a
default filename with suffix :attr:`FileUtils.default_extension`
unless another one was supplied.
- Subsequent invocations either change the filename accordingly or
ensure that the default filename is set with the proper suffix. | Below is the the instruction that describes the task:
### Input:
Initialize the current filename :attr:`FileUtils.real_filename` of the object.
Bit of a hack.
- The first invocation must have ``filename != None``; this will set a
default filename with suffix :attr:`FileUtils.default_extension`
unless another one was supplied.
- Subsequent invocations either change the filename accordingly or
ensure that the default filename is set with the proper suffix.
### Response:
def _init_filename(self, filename=None, ext=None):
"""Initialize the current filename :attr:`FileUtils.real_filename` of the object.
Bit of a hack.
- The first invocation must have ``filename != None``; this will set a
default filename with suffix :attr:`FileUtils.default_extension`
unless another one was supplied.
- Subsequent invocations either change the filename accordingly or
ensure that the default filename is set with the proper suffix.
"""
extension = ext or self.default_extension
filename = self.filename(filename, ext=extension, use_my_ext=True, set_default=True)
#: Current full path of the object for reading and writing I/O.
self.real_filename = os.path.realpath(filename) |
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name) | Creates the table to store association info between user and blog
posts.
:return: | Below is the the instruction that describes the task:
### Input:
Creates the table to store association info between user and blog
posts.
:return:
### Response:
def _create_user_posts_table(self):
"""
Creates the table to store association info between user and blog
posts.
:return:
"""
with self._engine.begin() as conn:
user_posts_table_name = self._table_name("user_posts")
if not conn.dialect.has_table(conn, user_posts_table_name):
post_id_key = self._table_name("post") + ".id"
self._user_posts_table = sqla.Table(
user_posts_table_name, self._metadata,
sqla.Column("user_id", sqla.String(128), index=True),
sqla.Column("post_id", sqla.Integer,
sqla.ForeignKey(post_id_key,
onupdate="CASCADE",
ondelete="CASCADE"),
index=True),
sqla.UniqueConstraint('user_id', 'post_id', name='uix_2'),
info=self._info
)
self._logger.debug("Created table with table name %s" %
user_posts_table_name)
else:
self._user_posts_table = \
self._metadata.tables[user_posts_table_name]
self._logger.debug("Reflecting to table with table name %s" %
user_posts_table_name) |
def signrawtransaction(self, rawtxhash, parent_tx_outputs=None, private_key=None):
"""signrawtransaction returns status and rawtxhash
: rawtxhash - serialized transaction (hex)
: parent_tx_outputs - outputs being spent by this transaction
: private_key - a private key to sign this transaction with
"""
if not parent_tx_outputs and not private_key:
return self.req("signrawtransaction", [rawtxhash])
else:
return self.req(
"signrawtransaction", [rawtxhash, parent_tx_outputs, private_key]
) | signrawtransaction returns status and rawtxhash
: rawtxhash - serialized transaction (hex)
: parent_tx_outputs - outputs being spent by this transaction
: private_key - a private key to sign this transaction with | Below is the the instruction that describes the task:
### Input:
signrawtransaction returns status and rawtxhash
: rawtxhash - serialized transaction (hex)
: parent_tx_outputs - outputs being spent by this transaction
: private_key - a private key to sign this transaction with
### Response:
def signrawtransaction(self, rawtxhash, parent_tx_outputs=None, private_key=None):
"""signrawtransaction returns status and rawtxhash
: rawtxhash - serialized transaction (hex)
: parent_tx_outputs - outputs being spent by this transaction
: private_key - a private key to sign this transaction with
"""
if not parent_tx_outputs and not private_key:
return self.req("signrawtransaction", [rawtxhash])
else:
return self.req(
"signrawtransaction", [rawtxhash, parent_tx_outputs, private_key]
) |
def metadata_matches(self, query={}):
"""
Returns key matches to metadata
This will check every key in query for a matching key in metadata
returning true if every key is in metadata. query without keys
return false.
Args:
query(object): metadata for matching
Returns:
bool:
True: when key count in query is > 0 and all keys in query in
self.metadata
False: if key count in query is <= 0 or any key in query not
found in self.metadata
"""
result = len(query.keys()) > 0
for key in query.keys():
result = result and query[key] == self.metadata.get(key)
return result | Returns key matches to metadata
This will check every key in query for a matching key in metadata
returning true if every key is in metadata. query without keys
return false.
Args:
query(object): metadata for matching
Returns:
bool:
True: when key count in query is > 0 and all keys in query in
self.metadata
False: if key count in query is <= 0 or any key in query not
found in self.metadata | Below is the the instruction that describes the task:
### Input:
Returns key matches to metadata
This will check every key in query for a matching key in metadata
returning true if every key is in metadata. query without keys
return false.
Args:
query(object): metadata for matching
Returns:
bool:
True: when key count in query is > 0 and all keys in query in
self.metadata
False: if key count in query is <= 0 or any key in query not
found in self.metadata
### Response:
def metadata_matches(self, query={}):
"""
Returns key matches to metadata
This will check every key in query for a matching key in metadata
returning true if every key is in metadata. query without keys
return false.
Args:
query(object): metadata for matching
Returns:
bool:
True: when key count in query is > 0 and all keys in query in
self.metadata
False: if key count in query is <= 0 or any key in query not
found in self.metadata
"""
result = len(query.keys()) > 0
for key in query.keys():
result = result and query[key] == self.metadata.get(key)
return result |
def get_dag_params(self) -> Dict[str, Any]:
"""
Merges default config with dag config, sets dag_id, and extropolates dag_start_date
:returns: dict of dag parameters
"""
try:
dag_params: Dict[str, Any] = utils.merge_configs(self.dag_config, self.default_config)
except Exception as e:
raise Exception(f"Failed to merge config with default config, err: {e}")
dag_params["dag_id"]: str = self.dag_name
try:
# ensure that default_args dictionary contains key "start_date" with "datetime" value in specified timezone
dag_params["default_args"]["start_date"]: datetime = utils.get_start_date(
date_value=dag_params["default_args"]["start_date"],
timezone=dag_params["default_args"].get("timezone", "UTC"),
)
except KeyError as e:
raise Exception(f"{self.dag_name} config is missing start_date, err: {e}")
return dag_params | Merges default config with dag config, sets dag_id, and extropolates dag_start_date
:returns: dict of dag parameters | Below is the the instruction that describes the task:
### Input:
Merges default config with dag config, sets dag_id, and extropolates dag_start_date
:returns: dict of dag parameters
### Response:
def get_dag_params(self) -> Dict[str, Any]:
"""
Merges default config with dag config, sets dag_id, and extropolates dag_start_date
:returns: dict of dag parameters
"""
try:
dag_params: Dict[str, Any] = utils.merge_configs(self.dag_config, self.default_config)
except Exception as e:
raise Exception(f"Failed to merge config with default config, err: {e}")
dag_params["dag_id"]: str = self.dag_name
try:
# ensure that default_args dictionary contains key "start_date" with "datetime" value in specified timezone
dag_params["default_args"]["start_date"]: datetime = utils.get_start_date(
date_value=dag_params["default_args"]["start_date"],
timezone=dag_params["default_args"].get("timezone", "UTC"),
)
except KeyError as e:
raise Exception(f"{self.dag_name} config is missing start_date, err: {e}")
return dag_params |
def delete_file(self, fname, multiple, yes_to_all):
"""Delete file"""
if multiple:
buttons = QMessageBox.Yes|QMessageBox.YesToAll| \
QMessageBox.No|QMessageBox.Cancel
else:
buttons = QMessageBox.Yes|QMessageBox.No
if yes_to_all is None:
answer = QMessageBox.warning(self, _("Delete"),
_("Do you really want "
"to delete <b>%s</b>?"
) % osp.basename(fname), buttons)
if answer == QMessageBox.No:
return yes_to_all
elif answer == QMessageBox.Cancel:
return False
elif answer == QMessageBox.YesToAll:
yes_to_all = True
try:
if osp.isfile(fname):
misc.remove_file(fname)
self.sig_removed.emit(fname)
else:
self.remove_tree(fname)
self.sig_removed_tree.emit(fname)
return yes_to_all
except EnvironmentError as error:
action_str = _('delete')
QMessageBox.critical(self, _("Project Explorer"),
_("<b>Unable to %s <i>%s</i></b>"
"<br><br>Error message:<br>%s"
) % (action_str, fname, to_text_string(error)))
return False | Delete file | Below is the the instruction that describes the task:
### Input:
Delete file
### Response:
def delete_file(self, fname, multiple, yes_to_all):
"""Delete file"""
if multiple:
buttons = QMessageBox.Yes|QMessageBox.YesToAll| \
QMessageBox.No|QMessageBox.Cancel
else:
buttons = QMessageBox.Yes|QMessageBox.No
if yes_to_all is None:
answer = QMessageBox.warning(self, _("Delete"),
_("Do you really want "
"to delete <b>%s</b>?"
) % osp.basename(fname), buttons)
if answer == QMessageBox.No:
return yes_to_all
elif answer == QMessageBox.Cancel:
return False
elif answer == QMessageBox.YesToAll:
yes_to_all = True
try:
if osp.isfile(fname):
misc.remove_file(fname)
self.sig_removed.emit(fname)
else:
self.remove_tree(fname)
self.sig_removed_tree.emit(fname)
return yes_to_all
except EnvironmentError as error:
action_str = _('delete')
QMessageBox.critical(self, _("Project Explorer"),
_("<b>Unable to %s <i>%s</i></b>"
"<br><br>Error message:<br>%s"
) % (action_str, fname, to_text_string(error)))
return False |
def process_line(self, record):
"Process a single record. This assumes only a single sample output."
cleaned = []
for key in self.vcf_fields:
out = self.process_column(key, getattr(record, key))
if isinstance(out, (list, tuple)):
cleaned.extend(out)
else:
cleaned.append(out)
for key in self.info_fields:
out = self.process_column(key, record.INFO.get(key, None))
if isinstance(out, (list, tuple)):
cleaned.extend(out)
else:
cleaned.append(out)
return cleaned | Process a single record. This assumes only a single sample output. | Below is the the instruction that describes the task:
### Input:
Process a single record. This assumes only a single sample output.
### Response:
def process_line(self, record):
"Process a single record. This assumes only a single sample output."
cleaned = []
for key in self.vcf_fields:
out = self.process_column(key, getattr(record, key))
if isinstance(out, (list, tuple)):
cleaned.extend(out)
else:
cleaned.append(out)
for key in self.info_fields:
out = self.process_column(key, record.INFO.get(key, None))
if isinstance(out, (list, tuple)):
cleaned.extend(out)
else:
cleaned.append(out)
return cleaned |
async def available_ssids() -> List[Dict[str, Any]]:
""" List the visible (broadcasting SSID) wireless networks.
Returns a list of the SSIDs. They may contain spaces and should be escaped
if later passed to a shell.
"""
fields = ['ssid', 'signal', 'active', 'security']
cmd = ['--terse',
'--fields',
','.join(fields),
'device',
'wifi',
'list']
out, err = await _call(cmd)
if err:
raise RuntimeError(err)
output = _dict_from_terse_tabular(
fields, out,
transformers={'signal': lambda s: int(s) if s.isdigit() else None,
'active': lambda a: a.lower() == 'yes',
'ssid': lambda s: s if s != '--' else None})
return [_add_security_type_to_scan(nw) for nw in output if nw['ssid']] | List the visible (broadcasting SSID) wireless networks.
Returns a list of the SSIDs. They may contain spaces and should be escaped
if later passed to a shell. | Below is the the instruction that describes the task:
### Input:
List the visible (broadcasting SSID) wireless networks.
Returns a list of the SSIDs. They may contain spaces and should be escaped
if later passed to a shell.
### Response:
async def available_ssids() -> List[Dict[str, Any]]:
""" List the visible (broadcasting SSID) wireless networks.
Returns a list of the SSIDs. They may contain spaces and should be escaped
if later passed to a shell.
"""
fields = ['ssid', 'signal', 'active', 'security']
cmd = ['--terse',
'--fields',
','.join(fields),
'device',
'wifi',
'list']
out, err = await _call(cmd)
if err:
raise RuntimeError(err)
output = _dict_from_terse_tabular(
fields, out,
transformers={'signal': lambda s: int(s) if s.isdigit() else None,
'active': lambda a: a.lower() == 'yes',
'ssid': lambda s: s if s != '--' else None})
return [_add_security_type_to_scan(nw) for nw in output if nw['ssid']] |
def scopes(self, **kwargs):
"""Scopes associated to the team."""
return self._client.scopes(team=self.id, **kwargs) | Scopes associated to the team. | Below is the the instruction that describes the task:
### Input:
Scopes associated to the team.
### Response:
def scopes(self, **kwargs):
"""Scopes associated to the team."""
return self._client.scopes(team=self.id, **kwargs) |
def head(self, n=None, **kwargs):
"""
Return the first n rows. Execute at once.
:param n:
:return: result frame
:rtype: :class:`odps.df.backends.frame.ResultFrame`
"""
if n is None:
n = options.display.max_rows
return self._handle_delay_call('execute', self, head=n, **kwargs) | Return the first n rows. Execute at once.
:param n:
:return: result frame
:rtype: :class:`odps.df.backends.frame.ResultFrame` | Below is the the instruction that describes the task:
### Input:
Return the first n rows. Execute at once.
:param n:
:return: result frame
:rtype: :class:`odps.df.backends.frame.ResultFrame`
### Response:
def head(self, n=None, **kwargs):
"""
Return the first n rows. Execute at once.
:param n:
:return: result frame
:rtype: :class:`odps.df.backends.frame.ResultFrame`
"""
if n is None:
n = options.display.max_rows
return self._handle_delay_call('execute', self, head=n, **kwargs) |
def label_accuracy_score(label_trues, label_preds, n_class):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = np.zeros((n_class, n_class))
for lt, lp in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
acc = np.diag(hist).sum() / hist.sum()
with np.errstate(divide='ignore', invalid='ignore'):
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
with np.errstate(divide='ignore', invalid='ignore'):
iu = np.diag(hist) / (
hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)
)
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, mean_iu, fwavacc | Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc | Below is the the instruction that describes the task:
### Input:
Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
### Response:
def label_accuracy_score(label_trues, label_preds, n_class):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = np.zeros((n_class, n_class))
for lt, lp in zip(label_trues, label_preds):
hist += _fast_hist(lt.flatten(), lp.flatten(), n_class)
acc = np.diag(hist).sum() / hist.sum()
with np.errstate(divide='ignore', invalid='ignore'):
acc_cls = np.diag(hist) / hist.sum(axis=1)
acc_cls = np.nanmean(acc_cls)
with np.errstate(divide='ignore', invalid='ignore'):
iu = np.diag(hist) / (
hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)
)
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum()
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, mean_iu, fwavacc |
def _create_filter_by(self):
"""Transform the json-server filter arguments to model-resource ones."""
filter_by = []
for name, values in request.args.copy().lists(): # copy.lists works in py2 and py3
if name not in _SKIPPED_ARGUMENTS:
column = _re_column_name.search(name).group(1)
if column not in self._model_columns:
continue
for value in values:
if name.endswith('_ne'):
filter_by.append(name[:-3] + '!=' + value)
elif name.endswith('_lte'):
filter_by.append(name[:-4] + '<=' + value)
elif name.endswith('_gte'):
filter_by.append(name[:-4] + '>=' + value)
elif name.endswith('_like'):
filter_by.append(name[:-5] + '::like::%' + value + '%')
else:
filter_by.append(name.replace('__', '.') + '==' + value)
filter_by += self._create_fulltext_query()
return ','.join(filter_by) | Transform the json-server filter arguments to model-resource ones. | Below is the the instruction that describes the task:
### Input:
Transform the json-server filter arguments to model-resource ones.
### Response:
def _create_filter_by(self):
"""Transform the json-server filter arguments to model-resource ones."""
filter_by = []
for name, values in request.args.copy().lists(): # copy.lists works in py2 and py3
if name not in _SKIPPED_ARGUMENTS:
column = _re_column_name.search(name).group(1)
if column not in self._model_columns:
continue
for value in values:
if name.endswith('_ne'):
filter_by.append(name[:-3] + '!=' + value)
elif name.endswith('_lte'):
filter_by.append(name[:-4] + '<=' + value)
elif name.endswith('_gte'):
filter_by.append(name[:-4] + '>=' + value)
elif name.endswith('_like'):
filter_by.append(name[:-5] + '::like::%' + value + '%')
else:
filter_by.append(name.replace('__', '.') + '==' + value)
filter_by += self._create_fulltext_query()
return ','.join(filter_by) |
def search(self, ctype, level=None, category=None, assetId=None, defId=None,
min_price=None, max_price=None, min_buy=None, max_buy=None,
league=None, club=None, position=None, zone=None, nationality=None,
rare=False, playStyle=None, start=0, page_size=itemsPerPage['transferMarket'],
fast=False):
"""Prepare search request, send and return parsed data as a dict.
:param ctype: [development / ? / ?] Card type.
:param level: (optional) [?/?/gold] Card level.
:param category: (optional) [fitness/?/?] Card category.
:param assetId: (optional) Asset id.
:param defId: (optional) Definition id.
:param min_price: (optional) Minimal price.
:param max_price: (optional) Maximum price.
:param min_buy: (optional) Minimal buy now price.
:param max_buy: (optional) Maximum buy now price.
:param league: (optional) League id.
:param club: (optional) Club id.
:param position: (optional) Position.
:param nationality: (optional) Nation id.
:param rare: (optional) [boolean] True for searching special cards.
:param playStyle: (optional) Play style.
:param start: (optional) Start page sent to server so it supposed to be 12/15, 24/30 etc. (default platform page_size*n)
:param page_size: (optional) Page size (items per page).
"""
# TODO: add "search" alias
# TODO: generator
method = 'GET'
url = 'transfermarket'
# pinEvents
if start == 0:
events = [self.pin.event('page_view', 'Hub - Transfers'), self.pin.event('page_view', 'Transfer Market Search')]
self.pin.send(events, fast=fast)
params = {
'start': start,
'num': page_size,
'type': ctype, # "type" namespace is reserved in python
}
if level:
params['lev'] = level
if category:
params['cat'] = category
if assetId:
params['maskedDefId'] = assetId
if defId:
params['definitionId'] = defId
if min_price:
params['micr'] = min_price
if max_price:
params['macr'] = max_price
if min_buy:
params['minb'] = min_buy
if max_buy:
params['maxb'] = max_buy
if league:
params['leag'] = league
if club:
params['team'] = club
if position:
params['pos'] = position
if zone:
params['zone'] = zone
if nationality:
params['nat'] = nationality
if rare:
params['rare'] = 'SP'
if playStyle:
params['playStyle'] = playStyle
rc = self.__request__(method, url, params=params, fast=fast)
# pinEvents
if start == 0:
events = [self.pin.event('page_view', 'Transfer Market Results - List View'), self.pin.event('page_view', 'Item - Detail View')]
self.pin.send(events, fast=fast)
return [itemParse(i) for i in rc.get('auctionInfo', ())] | Prepare search request, send and return parsed data as a dict.
:param ctype: [development / ? / ?] Card type.
:param level: (optional) [?/?/gold] Card level.
:param category: (optional) [fitness/?/?] Card category.
:param assetId: (optional) Asset id.
:param defId: (optional) Definition id.
:param min_price: (optional) Minimal price.
:param max_price: (optional) Maximum price.
:param min_buy: (optional) Minimal buy now price.
:param max_buy: (optional) Maximum buy now price.
:param league: (optional) League id.
:param club: (optional) Club id.
:param position: (optional) Position.
:param nationality: (optional) Nation id.
:param rare: (optional) [boolean] True for searching special cards.
:param playStyle: (optional) Play style.
:param start: (optional) Start page sent to server so it supposed to be 12/15, 24/30 etc. (default platform page_size*n)
:param page_size: (optional) Page size (items per page). | Below is the the instruction that describes the task:
### Input:
Prepare search request, send and return parsed data as a dict.
:param ctype: [development / ? / ?] Card type.
:param level: (optional) [?/?/gold] Card level.
:param category: (optional) [fitness/?/?] Card category.
:param assetId: (optional) Asset id.
:param defId: (optional) Definition id.
:param min_price: (optional) Minimal price.
:param max_price: (optional) Maximum price.
:param min_buy: (optional) Minimal buy now price.
:param max_buy: (optional) Maximum buy now price.
:param league: (optional) League id.
:param club: (optional) Club id.
:param position: (optional) Position.
:param nationality: (optional) Nation id.
:param rare: (optional) [boolean] True for searching special cards.
:param playStyle: (optional) Play style.
:param start: (optional) Start page sent to server so it supposed to be 12/15, 24/30 etc. (default platform page_size*n)
:param page_size: (optional) Page size (items per page).
### Response:
def search(self, ctype, level=None, category=None, assetId=None, defId=None,
min_price=None, max_price=None, min_buy=None, max_buy=None,
league=None, club=None, position=None, zone=None, nationality=None,
rare=False, playStyle=None, start=0, page_size=itemsPerPage['transferMarket'],
fast=False):
"""Prepare search request, send and return parsed data as a dict.
:param ctype: [development / ? / ?] Card type.
:param level: (optional) [?/?/gold] Card level.
:param category: (optional) [fitness/?/?] Card category.
:param assetId: (optional) Asset id.
:param defId: (optional) Definition id.
:param min_price: (optional) Minimal price.
:param max_price: (optional) Maximum price.
:param min_buy: (optional) Minimal buy now price.
:param max_buy: (optional) Maximum buy now price.
:param league: (optional) League id.
:param club: (optional) Club id.
:param position: (optional) Position.
:param nationality: (optional) Nation id.
:param rare: (optional) [boolean] True for searching special cards.
:param playStyle: (optional) Play style.
:param start: (optional) Start page sent to server so it supposed to be 12/15, 24/30 etc. (default platform page_size*n)
:param page_size: (optional) Page size (items per page).
"""
# TODO: add "search" alias
# TODO: generator
method = 'GET'
url = 'transfermarket'
# pinEvents
if start == 0:
events = [self.pin.event('page_view', 'Hub - Transfers'), self.pin.event('page_view', 'Transfer Market Search')]
self.pin.send(events, fast=fast)
params = {
'start': start,
'num': page_size,
'type': ctype, # "type" namespace is reserved in python
}
if level:
params['lev'] = level
if category:
params['cat'] = category
if assetId:
params['maskedDefId'] = assetId
if defId:
params['definitionId'] = defId
if min_price:
params['micr'] = min_price
if max_price:
params['macr'] = max_price
if min_buy:
params['minb'] = min_buy
if max_buy:
params['maxb'] = max_buy
if league:
params['leag'] = league
if club:
params['team'] = club
if position:
params['pos'] = position
if zone:
params['zone'] = zone
if nationality:
params['nat'] = nationality
if rare:
params['rare'] = 'SP'
if playStyle:
params['playStyle'] = playStyle
rc = self.__request__(method, url, params=params, fast=fast)
# pinEvents
if start == 0:
events = [self.pin.event('page_view', 'Transfer Market Results - List View'), self.pin.event('page_view', 'Item - Detail View')]
self.pin.send(events, fast=fast)
return [itemParse(i) for i in rc.get('auctionInfo', ())] |
def sde(self):
"""
Return the state space representation of the covariance.
"""
variance = float(self.variance.values)
lengthscale = float(self.lengthscale)
F = np.array(((-1.0/lengthscale,),))
L = np.array(((1.0,),))
Qc = np.array( ((2.0*variance/lengthscale,),) )
H = np.array(((1.0,),))
Pinf = np.array(((variance,),))
P0 = Pinf.copy()
dF = np.zeros((1,1,2));
dQc = np.zeros((1,1,2));
dPinf = np.zeros((1,1,2));
dF[:,:,0] = 0.0
dF[:,:,1] = 1.0/lengthscale**2
dQc[:,:,0] = 2.0/lengthscale
dQc[:,:,1] = -2.0*variance/lengthscale**2
dPinf[:,:,0] = 1.0
dPinf[:,:,1] = 0.0
dP0 = dPinf.copy()
return (F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0) | Return the state space representation of the covariance. | Below is the the instruction that describes the task:
### Input:
Return the state space representation of the covariance.
### Response:
def sde(self):
"""
Return the state space representation of the covariance.
"""
variance = float(self.variance.values)
lengthscale = float(self.lengthscale)
F = np.array(((-1.0/lengthscale,),))
L = np.array(((1.0,),))
Qc = np.array( ((2.0*variance/lengthscale,),) )
H = np.array(((1.0,),))
Pinf = np.array(((variance,),))
P0 = Pinf.copy()
dF = np.zeros((1,1,2));
dQc = np.zeros((1,1,2));
dPinf = np.zeros((1,1,2));
dF[:,:,0] = 0.0
dF[:,:,1] = 1.0/lengthscale**2
dQc[:,:,0] = 2.0/lengthscale
dQc[:,:,1] = -2.0*variance/lengthscale**2
dPinf[:,:,0] = 1.0
dPinf[:,:,1] = 0.0
dP0 = dPinf.copy()
return (F, L, Qc, H, Pinf, P0, dF, dQc, dPinf, dP0) |
def get_managers(self):
"""Get managers for the slave environments.
"""
if self._single_env:
return None
if not hasattr(self, '_managers'):
self._managers = self.env.get_slave_managers()
return self._managers | Get managers for the slave environments. | Below is the the instruction that describes the task:
### Input:
Get managers for the slave environments.
### Response:
def get_managers(self):
"""Get managers for the slave environments.
"""
if self._single_env:
return None
if not hasattr(self, '_managers'):
self._managers = self.env.get_slave_managers()
return self._managers |
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise | Add helping materials to a project. | Below is the the instruction that describes the task:
### Input:
Add helping materials to a project.
### Response:
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise |
def decrypt_email(enc_email):
"""
The inverse of :func:`encrypt_email`.
:param enc_email:
The encrypted email address.
"""
aes = SimpleAES(flask.current_app.config["AES_KEY"])
return aes.decrypt(enc_email) | The inverse of :func:`encrypt_email`.
:param enc_email:
The encrypted email address. | Below is the the instruction that describes the task:
### Input:
The inverse of :func:`encrypt_email`.
:param enc_email:
The encrypted email address.
### Response:
def decrypt_email(enc_email):
"""
The inverse of :func:`encrypt_email`.
:param enc_email:
The encrypted email address.
"""
aes = SimpleAES(flask.current_app.config["AES_KEY"])
return aes.decrypt(enc_email) |
def view_task_hazard(token, dstore):
"""
Display info about a given task. Here are a few examples of usage::
$ oq show task_hazard:0 # the fastest task
$ oq show task_hazard:-1 # the slowest task
"""
tasks = set(dstore['task_info'])
if 'source_data' not in dstore:
return 'Missing source_data'
if 'classical_split_filter' in tasks:
data = dstore['task_info/classical_split_filter'].value
else:
data = dstore['task_info/compute_gmfs'].value
data.sort(order='duration')
rec = data[int(token.split(':')[1])]
taskno = rec['taskno']
arr = get_array(dstore['source_data'].value, taskno=taskno)
st = [stats('nsites', arr['nsites']), stats('weight', arr['weight'])]
sources = dstore['task_sources'][taskno - 1].split()
srcs = set(decode(s).split(':', 1)[0] for s in sources)
res = 'taskno=%d, weight=%d, duration=%d s, sources="%s"\n\n' % (
taskno, rec['weight'], rec['duration'], ' '.join(sorted(srcs)))
return res + rst_table(st, header='variable mean stddev min max n'.split()) | Display info about a given task. Here are a few examples of usage::
$ oq show task_hazard:0 # the fastest task
$ oq show task_hazard:-1 # the slowest task | Below is the the instruction that describes the task:
### Input:
Display info about a given task. Here are a few examples of usage::
$ oq show task_hazard:0 # the fastest task
$ oq show task_hazard:-1 # the slowest task
### Response:
def view_task_hazard(token, dstore):
"""
Display info about a given task. Here are a few examples of usage::
$ oq show task_hazard:0 # the fastest task
$ oq show task_hazard:-1 # the slowest task
"""
tasks = set(dstore['task_info'])
if 'source_data' not in dstore:
return 'Missing source_data'
if 'classical_split_filter' in tasks:
data = dstore['task_info/classical_split_filter'].value
else:
data = dstore['task_info/compute_gmfs'].value
data.sort(order='duration')
rec = data[int(token.split(':')[1])]
taskno = rec['taskno']
arr = get_array(dstore['source_data'].value, taskno=taskno)
st = [stats('nsites', arr['nsites']), stats('weight', arr['weight'])]
sources = dstore['task_sources'][taskno - 1].split()
srcs = set(decode(s).split(':', 1)[0] for s in sources)
res = 'taskno=%d, weight=%d, duration=%d s, sources="%s"\n\n' % (
taskno, rec['weight'], rec['duration'], ' '.join(sorted(srcs)))
return res + rst_table(st, header='variable mean stddev min max n'.split()) |
def optimal_marginal_branch_length(self, node, tol=1e-10):
'''
calculate the marginal distribution of sequence states on both ends
of the branch leading to node,
Parameters
----------
node : PhyloTree.Clade
TreeNode, attached to the branch.
Returns
-------
branch_length : float
branch length of the branch leading to the node.
note: this can be unstable on iteration
'''
if node.up is None:
return self.one_mutation
pp, pc = self.marginal_branch_profile(node)
return self.gtr.optimal_t_compressed((pp, pc), self.multiplicity, profiles=True, tol=tol) | calculate the marginal distribution of sequence states on both ends
of the branch leading to node,
Parameters
----------
node : PhyloTree.Clade
TreeNode, attached to the branch.
Returns
-------
branch_length : float
branch length of the branch leading to the node.
note: this can be unstable on iteration | Below is the the instruction that describes the task:
### Input:
calculate the marginal distribution of sequence states on both ends
of the branch leading to node,
Parameters
----------
node : PhyloTree.Clade
TreeNode, attached to the branch.
Returns
-------
branch_length : float
branch length of the branch leading to the node.
note: this can be unstable on iteration
### Response:
def optimal_marginal_branch_length(self, node, tol=1e-10):
'''
calculate the marginal distribution of sequence states on both ends
of the branch leading to node,
Parameters
----------
node : PhyloTree.Clade
TreeNode, attached to the branch.
Returns
-------
branch_length : float
branch length of the branch leading to the node.
note: this can be unstable on iteration
'''
if node.up is None:
return self.one_mutation
pp, pc = self.marginal_branch_profile(node)
return self.gtr.optimal_t_compressed((pp, pc), self.multiplicity, profiles=True, tol=tol) |
def get_trap_definitions():
"""Takes in no param as input to fetch SNMP TRAP definitions from HP IMC RESTFUL API
:param None
:return: object of type list containing the device asset details
"""
# checks to see if the imc credentials are already available
if auth is None or url is None:
set_imc_creds()
global r
get_trap_def_url = "/imcrs/fault/trapDefine/sync/query?enterpriseId=1.3.6.1.4.1.11&size=10000"
f_url = url + get_trap_def_url
payload = None
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=headers)
# r.status_code
if r.status_code == 200:
trap_def_list = (json.loads(r.text))
return trap_def_list['trapDefine']
else:
print("get_dev_asset_details: An Error has occured") | Takes in no param as input to fetch SNMP TRAP definitions from HP IMC RESTFUL API
:param None
:return: object of type list containing the device asset details | Below is the the instruction that describes the task:
### Input:
Takes in no param as input to fetch SNMP TRAP definitions from HP IMC RESTFUL API
:param None
:return: object of type list containing the device asset details
### Response:
def get_trap_definitions():
"""Takes in no param as input to fetch SNMP TRAP definitions from HP IMC RESTFUL API
:param None
:return: object of type list containing the device asset details
"""
# checks to see if the imc credentials are already available
if auth is None or url is None:
set_imc_creds()
global r
get_trap_def_url = "/imcrs/fault/trapDefine/sync/query?enterpriseId=1.3.6.1.4.1.11&size=10000"
f_url = url + get_trap_def_url
payload = None
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=headers)
# r.status_code
if r.status_code == 200:
trap_def_list = (json.loads(r.text))
return trap_def_list['trapDefine']
else:
print("get_dev_asset_details: An Error has occured") |
def long_input(prompt='Multi-line input\n' + \
'Enter EOF on a blank line to end ' + \
'(ctrl-D in *nix, ctrl-Z in windows)',
maxlines = None, maxlength = None):
"""Get a multi-line string as input"""
lines = []
print(prompt)
lnum = 1
try:
while True:
if maxlines:
if lnum > maxlines:
break
else:
if maxlength:
lines.append(string_input('')[:maxlength])
else:
lines.append(string_input(''))
lnum += 1
else:
if maxlength:
lines.append(string_input('')[:maxlength])
else:
lines.append(string_input(''))
except EOFError:
pass
finally:
return '\n'.join(lines) | Get a multi-line string as input | Below is the the instruction that describes the task:
### Input:
Get a multi-line string as input
### Response:
def long_input(prompt='Multi-line input\n' + \
'Enter EOF on a blank line to end ' + \
'(ctrl-D in *nix, ctrl-Z in windows)',
maxlines = None, maxlength = None):
"""Get a multi-line string as input"""
lines = []
print(prompt)
lnum = 1
try:
while True:
if maxlines:
if lnum > maxlines:
break
else:
if maxlength:
lines.append(string_input('')[:maxlength])
else:
lines.append(string_input(''))
lnum += 1
else:
if maxlength:
lines.append(string_input('')[:maxlength])
else:
lines.append(string_input(''))
except EOFError:
pass
finally:
return '\n'.join(lines) |
def delete_notes(self, noteids):
"""Delete a note or notes
:param noteids: The noteids to delete
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/delete', post_data={
'noteids[]' : noteids
})
return response | Delete a note or notes
:param noteids: The noteids to delete | Below is the the instruction that describes the task:
### Input:
Delete a note or notes
:param noteids: The noteids to delete
### Response:
def delete_notes(self, noteids):
"""Delete a note or notes
:param noteids: The noteids to delete
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/notes/delete', post_data={
'noteids[]' : noteids
})
return response |
def verify_subscription(request, ident):
"""
Verifies an unverified subscription and create or appends
to an existing subscription.
"""
try:
unverified = UnverifiedSubscription.objects.get(ident=ident)
except UnverifiedSubscription.DoesNotExist:
return respond('overseer/invalid_subscription_token.html', {}, request)
subscription = Subscription.objects.get_or_create(email=unverified.email, defaults={
'ident': unverified.ident,
})[0]
subscription.services = unverified.services.all()
unverified.delete()
return respond('overseer/subscription_confirmed.html', {
'subscription': subscription,
}, request) | Verifies an unverified subscription and create or appends
to an existing subscription. | Below is the the instruction that describes the task:
### Input:
Verifies an unverified subscription and create or appends
to an existing subscription.
### Response:
def verify_subscription(request, ident):
"""
Verifies an unverified subscription and create or appends
to an existing subscription.
"""
try:
unverified = UnverifiedSubscription.objects.get(ident=ident)
except UnverifiedSubscription.DoesNotExist:
return respond('overseer/invalid_subscription_token.html', {}, request)
subscription = Subscription.objects.get_or_create(email=unverified.email, defaults={
'ident': unverified.ident,
})[0]
subscription.services = unverified.services.all()
unverified.delete()
return respond('overseer/subscription_confirmed.html', {
'subscription': subscription,
}, request) |
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map | Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for | Below is the the instruction that describes the task:
### Input:
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
### Response:
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map |
def date_from_isoformat(isoformat_date):
"""Convert an ISO-8601 date into a `datetime.date` object.
Argument:
isoformat_date (str): a date in ISO-8601 format (YYYY-MM-DD)
Returns:
~datetime.date: the object corresponding to the given ISO date.
Raises:
ValueError: when the date could not be converted successfully.
See Also:
`ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_.
"""
year, month, day = isoformat_date.split('-')
return datetime.date(int(year), int(month), int(day)) | Convert an ISO-8601 date into a `datetime.date` object.
Argument:
isoformat_date (str): a date in ISO-8601 format (YYYY-MM-DD)
Returns:
~datetime.date: the object corresponding to the given ISO date.
Raises:
ValueError: when the date could not be converted successfully.
See Also:
`ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_. | Below is the the instruction that describes the task:
### Input:
Convert an ISO-8601 date into a `datetime.date` object.
Argument:
isoformat_date (str): a date in ISO-8601 format (YYYY-MM-DD)
Returns:
~datetime.date: the object corresponding to the given ISO date.
Raises:
ValueError: when the date could not be converted successfully.
See Also:
`ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_.
### Response:
def date_from_isoformat(isoformat_date):
"""Convert an ISO-8601 date into a `datetime.date` object.
Argument:
isoformat_date (str): a date in ISO-8601 format (YYYY-MM-DD)
Returns:
~datetime.date: the object corresponding to the given ISO date.
Raises:
ValueError: when the date could not be converted successfully.
See Also:
`ISO-8601 specification <https://en.wikipedia.org/wiki/ISO_8601>`_.
"""
year, month, day = isoformat_date.split('-')
return datetime.date(int(year), int(month), int(day)) |
def get_user():
"""User information.
.. note::
**Privacy note** A users IP address, user agent string, and user id
(if logged in) is sent to a message queue, where it is stored for about
5 minutes. The information is used to:
- Detect robot visits from the user agent string.
- Generate an anonymized visitor id (using a random salt per day).
- Detect the users host contry based on the IP address.
The information is then discarded.
"""
return dict(
ip_address=request.remote_addr,
user_agent=request.user_agent.string,
user_id=(
current_user.get_id() if current_user.is_authenticated else None
),
session_id=session.get('sid_s')
) | User information.
.. note::
**Privacy note** A users IP address, user agent string, and user id
(if logged in) is sent to a message queue, where it is stored for about
5 minutes. The information is used to:
- Detect robot visits from the user agent string.
- Generate an anonymized visitor id (using a random salt per day).
- Detect the users host contry based on the IP address.
The information is then discarded. | Below is the the instruction that describes the task:
### Input:
User information.
.. note::
**Privacy note** A users IP address, user agent string, and user id
(if logged in) is sent to a message queue, where it is stored for about
5 minutes. The information is used to:
- Detect robot visits from the user agent string.
- Generate an anonymized visitor id (using a random salt per day).
- Detect the users host contry based on the IP address.
The information is then discarded.
### Response:
def get_user():
"""User information.
.. note::
**Privacy note** A users IP address, user agent string, and user id
(if logged in) is sent to a message queue, where it is stored for about
5 minutes. The information is used to:
- Detect robot visits from the user agent string.
- Generate an anonymized visitor id (using a random salt per day).
- Detect the users host contry based on the IP address.
The information is then discarded.
"""
return dict(
ip_address=request.remote_addr,
user_agent=request.user_agent.string,
user_id=(
current_user.get_id() if current_user.is_authenticated else None
),
session_id=session.get('sid_s')
) |
def dict_to_dataset(data, *, attrs=None, library=None, coords=None, dims=None):
"""Convert a dictionary of numpy arrays to an xarray.Dataset.
Parameters
----------
data : dict[str] -> ndarray
Data to convert. Keys are variable names.
attrs : dict
Json serializable metadata to attach to the dataset, in addition to defaults.
library : module
Library used for performing inference. Will be attached to the attrs metadata.
coords : dict[str] -> ndarray
Coordinates for the dataset
dims : dict[str] -> list[str]
Dimensions of each variable. The keys are variable names, values are lists of
coordinates.
Returns
-------
xr.Dataset
Examples
--------
dict_to_dataset({'x': np.random.randn(4, 100), 'y', np.random.rand(4, 100)})
"""
if dims is None:
dims = {}
data_vars = {}
for key, values in data.items():
data_vars[key] = numpy_to_data_array(
values, var_name=key, coords=coords, dims=dims.get(key)
)
return xr.Dataset(data_vars=data_vars, attrs=make_attrs(attrs=attrs, library=library)) | Convert a dictionary of numpy arrays to an xarray.Dataset.
Parameters
----------
data : dict[str] -> ndarray
Data to convert. Keys are variable names.
attrs : dict
Json serializable metadata to attach to the dataset, in addition to defaults.
library : module
Library used for performing inference. Will be attached to the attrs metadata.
coords : dict[str] -> ndarray
Coordinates for the dataset
dims : dict[str] -> list[str]
Dimensions of each variable. The keys are variable names, values are lists of
coordinates.
Returns
-------
xr.Dataset
Examples
--------
dict_to_dataset({'x': np.random.randn(4, 100), 'y', np.random.rand(4, 100)}) | Below is the the instruction that describes the task:
### Input:
Convert a dictionary of numpy arrays to an xarray.Dataset.
Parameters
----------
data : dict[str] -> ndarray
Data to convert. Keys are variable names.
attrs : dict
Json serializable metadata to attach to the dataset, in addition to defaults.
library : module
Library used for performing inference. Will be attached to the attrs metadata.
coords : dict[str] -> ndarray
Coordinates for the dataset
dims : dict[str] -> list[str]
Dimensions of each variable. The keys are variable names, values are lists of
coordinates.
Returns
-------
xr.Dataset
Examples
--------
dict_to_dataset({'x': np.random.randn(4, 100), 'y', np.random.rand(4, 100)})
### Response:
def dict_to_dataset(data, *, attrs=None, library=None, coords=None, dims=None):
"""Convert a dictionary of numpy arrays to an xarray.Dataset.
Parameters
----------
data : dict[str] -> ndarray
Data to convert. Keys are variable names.
attrs : dict
Json serializable metadata to attach to the dataset, in addition to defaults.
library : module
Library used for performing inference. Will be attached to the attrs metadata.
coords : dict[str] -> ndarray
Coordinates for the dataset
dims : dict[str] -> list[str]
Dimensions of each variable. The keys are variable names, values are lists of
coordinates.
Returns
-------
xr.Dataset
Examples
--------
dict_to_dataset({'x': np.random.randn(4, 100), 'y', np.random.rand(4, 100)})
"""
if dims is None:
dims = {}
data_vars = {}
for key, values in data.items():
data_vars[key] = numpy_to_data_array(
values, var_name=key, coords=coords, dims=dims.get(key)
)
return xr.Dataset(data_vars=data_vars, attrs=make_attrs(attrs=attrs, library=library)) |
def transform_and_attach(self,
image_list,
func,
show=True):
"""
Displays the transformed (combined) version of the cross-sections from each image,
(same slice and dimension). So if you input n>=1 images, n slices are obtained
from each image, which are passed to the func (callable) provided, and the
result will be displayed in the corresponding cell of the collage.
Useful applications:
- input two images, a function to overlay edges of one image on the other
- input two images, a function to mix them in a checkerboard pattern
- input one image, a function to saturate the upper half of intensities
(to increase contrast and reveal any subtle ghosting in slices)
func must be able to receive as many arguments as many elements in image_list.
if your func needs additional parameters, make them keyword arguments, and
use functools.partial to obtain a new callable that takes in just the slices.
Parameters
-----------
image_list : list or ndarray
list of images or a single ndarray
func : callable
function to be applied on the input images (their slices)
to produce a single slice to be displayed.
show : bool
flag to indicate whether make the collage visible.
"""
if not callable(func):
raise TypeError('func must be callable!')
if not isinstance(image_list, (tuple, list)) and isinstance(image_list, np.ndarray):
image_list = [image_list, ]
if len(image_list) > 1:
shape1 = image_list[0].shape
for ii in range(1, len(image_list)):
if image_list[ii].shape != shape1:
raise ValueError('All images must be of same shape!')
if len(image_list[ii].shape) < 3:
raise ValueError('All images must be atleast 3D')
slicer = SlicePicker(image_in=image_list[0],
view_set=self.view_set,
num_slices=self.num_slices)
try:
for img_obj, slice_list in zip(self.images,
slicer.get_slices_multi(image_list)):
img_obj.set_data(func(*slice_list))
except:
self._data_attached = False
raise ValueError('unable to attach mix of given images to current collage')
else:
self._data_attached = True
# show all the axes
if show:
self.show() | Displays the transformed (combined) version of the cross-sections from each image,
(same slice and dimension). So if you input n>=1 images, n slices are obtained
from each image, which are passed to the func (callable) provided, and the
result will be displayed in the corresponding cell of the collage.
Useful applications:
- input two images, a function to overlay edges of one image on the other
- input two images, a function to mix them in a checkerboard pattern
- input one image, a function to saturate the upper half of intensities
(to increase contrast and reveal any subtle ghosting in slices)
func must be able to receive as many arguments as many elements in image_list.
if your func needs additional parameters, make them keyword arguments, and
use functools.partial to obtain a new callable that takes in just the slices.
Parameters
-----------
image_list : list or ndarray
list of images or a single ndarray
func : callable
function to be applied on the input images (their slices)
to produce a single slice to be displayed.
show : bool
flag to indicate whether make the collage visible. | Below is the the instruction that describes the task:
### Input:
Displays the transformed (combined) version of the cross-sections from each image,
(same slice and dimension). So if you input n>=1 images, n slices are obtained
from each image, which are passed to the func (callable) provided, and the
result will be displayed in the corresponding cell of the collage.
Useful applications:
- input two images, a function to overlay edges of one image on the other
- input two images, a function to mix them in a checkerboard pattern
- input one image, a function to saturate the upper half of intensities
(to increase contrast and reveal any subtle ghosting in slices)
func must be able to receive as many arguments as many elements in image_list.
if your func needs additional parameters, make them keyword arguments, and
use functools.partial to obtain a new callable that takes in just the slices.
Parameters
-----------
image_list : list or ndarray
list of images or a single ndarray
func : callable
function to be applied on the input images (their slices)
to produce a single slice to be displayed.
show : bool
flag to indicate whether make the collage visible.
### Response:
def transform_and_attach(self,
image_list,
func,
show=True):
"""
Displays the transformed (combined) version of the cross-sections from each image,
(same slice and dimension). So if you input n>=1 images, n slices are obtained
from each image, which are passed to the func (callable) provided, and the
result will be displayed in the corresponding cell of the collage.
Useful applications:
- input two images, a function to overlay edges of one image on the other
- input two images, a function to mix them in a checkerboard pattern
- input one image, a function to saturate the upper half of intensities
(to increase contrast and reveal any subtle ghosting in slices)
func must be able to receive as many arguments as many elements in image_list.
if your func needs additional parameters, make them keyword arguments, and
use functools.partial to obtain a new callable that takes in just the slices.
Parameters
-----------
image_list : list or ndarray
list of images or a single ndarray
func : callable
function to be applied on the input images (their slices)
to produce a single slice to be displayed.
show : bool
flag to indicate whether make the collage visible.
"""
if not callable(func):
raise TypeError('func must be callable!')
if not isinstance(image_list, (tuple, list)) and isinstance(image_list, np.ndarray):
image_list = [image_list, ]
if len(image_list) > 1:
shape1 = image_list[0].shape
for ii in range(1, len(image_list)):
if image_list[ii].shape != shape1:
raise ValueError('All images must be of same shape!')
if len(image_list[ii].shape) < 3:
raise ValueError('All images must be atleast 3D')
slicer = SlicePicker(image_in=image_list[0],
view_set=self.view_set,
num_slices=self.num_slices)
try:
for img_obj, slice_list in zip(self.images,
slicer.get_slices_multi(image_list)):
img_obj.set_data(func(*slice_list))
except:
self._data_attached = False
raise ValueError('unable to attach mix of given images to current collage')
else:
self._data_attached = True
# show all the axes
if show:
self.show() |
def group_update(auth=None, **kwargs):
'''
Update a group
CLI Example:
.. code-block:: bash
salt '*' keystoneng.group_update name=group1 description='new description'
salt '*' keystoneng.group_create name=group2 domain_id=b62e76fbeeff4e8fb77073f591cf211e new_name=newgroupname
salt '*' keystoneng.group_create name=0e4febc2a5ab4f2c8f374b054162506d new_name=newgroupname
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
if 'new_name' in kwargs:
kwargs['name'] = kwargs.pop('new_name')
return cloud.update_group(**kwargs) | Update a group
CLI Example:
.. code-block:: bash
salt '*' keystoneng.group_update name=group1 description='new description'
salt '*' keystoneng.group_create name=group2 domain_id=b62e76fbeeff4e8fb77073f591cf211e new_name=newgroupname
salt '*' keystoneng.group_create name=0e4febc2a5ab4f2c8f374b054162506d new_name=newgroupname | Below is the the instruction that describes the task:
### Input:
Update a group
CLI Example:
.. code-block:: bash
salt '*' keystoneng.group_update name=group1 description='new description'
salt '*' keystoneng.group_create name=group2 domain_id=b62e76fbeeff4e8fb77073f591cf211e new_name=newgroupname
salt '*' keystoneng.group_create name=0e4febc2a5ab4f2c8f374b054162506d new_name=newgroupname
### Response:
def group_update(auth=None, **kwargs):
'''
Update a group
CLI Example:
.. code-block:: bash
salt '*' keystoneng.group_update name=group1 description='new description'
salt '*' keystoneng.group_create name=group2 domain_id=b62e76fbeeff4e8fb77073f591cf211e new_name=newgroupname
salt '*' keystoneng.group_create name=0e4febc2a5ab4f2c8f374b054162506d new_name=newgroupname
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(**kwargs)
if 'new_name' in kwargs:
kwargs['name'] = kwargs.pop('new_name')
return cloud.update_group(**kwargs) |
def list_actions(cls):
"""Get a list of exposed actions that are callable via the
``do_action()`` method."""
# Make sure these are always at the beginning of the list
actions = ['start', 'stop', 'restart', 'status']
# Iterate over the instance attributes checking for actions that
# have been exposed
for func_name in dir(cls):
func = getattr(cls, func_name)
if (not hasattr(func, '__call__') or
not getattr(func, '__daemonocle_exposed__', False)):
# Not a function or not exposed
continue
action = func_name.replace('_', '-')
if action not in actions:
actions.append(action)
return actions | Get a list of exposed actions that are callable via the
``do_action()`` method. | Below is the the instruction that describes the task:
### Input:
Get a list of exposed actions that are callable via the
``do_action()`` method.
### Response:
def list_actions(cls):
"""Get a list of exposed actions that are callable via the
``do_action()`` method."""
# Make sure these are always at the beginning of the list
actions = ['start', 'stop', 'restart', 'status']
# Iterate over the instance attributes checking for actions that
# have been exposed
for func_name in dir(cls):
func = getattr(cls, func_name)
if (not hasattr(func, '__call__') or
not getattr(func, '__daemonocle_exposed__', False)):
# Not a function or not exposed
continue
action = func_name.replace('_', '-')
if action not in actions:
actions.append(action)
return actions |
def _updateMinDutyCycles(self):
"""
Updates the minimum duty cycles defining normal activity for a column. A
column with activity duty cycle below this minimum threshold is boosted.
"""
if self._globalInhibition or self._inhibitionRadius > self._numInputs:
self._updateMinDutyCyclesGlobal()
else:
self._updateMinDutyCyclesLocal() | Updates the minimum duty cycles defining normal activity for a column. A
column with activity duty cycle below this minimum threshold is boosted. | Below is the the instruction that describes the task:
### Input:
Updates the minimum duty cycles defining normal activity for a column. A
column with activity duty cycle below this minimum threshold is boosted.
### Response:
def _updateMinDutyCycles(self):
"""
Updates the minimum duty cycles defining normal activity for a column. A
column with activity duty cycle below this minimum threshold is boosted.
"""
if self._globalInhibition or self._inhibitionRadius > self._numInputs:
self._updateMinDutyCyclesGlobal()
else:
self._updateMinDutyCyclesLocal() |
def resolve(self, targets, compile_classpath, sources, javadoc, executor):
"""
This is the core function for coursier resolve.
Validation strategy:
1. All targets are going through the `invalidated` to get fingerprinted in the target level.
No cache is fetched at this stage because it is disabled.
2. Once each target is fingerprinted, we combine them into a `VersionedTargetSet` where they
are fingerprinted together, because each run of 3rdparty resolve is context sensitive.
Artifacts are stored in `VersionedTargetSet`'s results_dir, the contents are the aggregation of
each coursier run happened within that context.
Caching: (TODO): https://github.com/pantsbuild/pants/issues/5187
Currently it is disabled due to absolute paths in the coursier results.
:param targets: a collection of targets to do 3rdparty resolve against
:param compile_classpath: classpath product that holds the resolution result. IMPORTANT: this parameter will be changed.
:param sources: if True, fetch sources for 3rdparty
:param javadoc: if True, fetch javadoc for 3rdparty
:param executor: An instance of `pants.java.executor.Executor`. If None, a subprocess executor will be assigned.
:return: n/a
"""
manager = JarDependencyManagement.global_instance()
jar_targets = manager.targets_by_artifact_set(targets)
executor = executor or SubprocessExecutor(DistributionLocator.cached())
if not isinstance(executor, Executor):
raise ValueError('The executor argument must be an Executor instance, given {} of type {}'.format(
executor, type(executor)))
for artifact_set, target_subset in jar_targets.items():
# TODO(wisechengyi): this is the only place we are using IvyUtil method, which isn't specific to ivy really.
raw_jar_deps, global_excludes = IvyUtils.calculate_classpath(target_subset)
# ['sources'] * False = [], ['sources'] * True = ['sources']
confs_for_fingerprint = ['sources'] * sources + ['javadoc'] * javadoc
fp_strategy = CoursierResolveFingerprintStrategy(confs_for_fingerprint)
compile_classpath.add_excludes_for_targets(target_subset)
with self.invalidated(target_subset,
invalidate_dependents=False,
silent=False,
fingerprint_strategy=fp_strategy) as invalidation_check:
if not invalidation_check.all_vts:
continue
resolve_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
vt_set_results_dir = self._prepare_vts_results_dir(resolve_vts)
pants_jar_base_dir = self._prepare_workdir()
coursier_cache_dir = CoursierSubsystem.global_instance().get_options().cache_dir
# If a report is requested, do not proceed with loading validated result.
if not self.get_options().report:
# Check each individual target without context first
# If the individuals are valid, check them as a VersionedTargetSet
if not invalidation_check.invalid_vts and resolve_vts.valid:
# Load up from the results dir
success = self._load_from_results_dir(compile_classpath, vt_set_results_dir,
coursier_cache_dir, invalidation_check, pants_jar_base_dir)
if success:
return
jars_to_resolve, pinned_coords = self._compute_jars_to_resolve_and_pin(raw_jar_deps,
artifact_set,
manager)
results = self._get_result_from_coursier(jars_to_resolve, global_excludes, pinned_coords,
coursier_cache_dir, sources, javadoc, executor)
for conf, result_list in results.items():
for result in result_list:
self._load_json_result(conf, compile_classpath, coursier_cache_dir, invalidation_check,
pants_jar_base_dir, result, self._override_classifiers_for_conf(conf))
self._populate_results_dir(vt_set_results_dir, results)
resolve_vts.update() | This is the core function for coursier resolve.
Validation strategy:
1. All targets are going through the `invalidated` to get fingerprinted in the target level.
No cache is fetched at this stage because it is disabled.
2. Once each target is fingerprinted, we combine them into a `VersionedTargetSet` where they
are fingerprinted together, because each run of 3rdparty resolve is context sensitive.
Artifacts are stored in `VersionedTargetSet`'s results_dir, the contents are the aggregation of
each coursier run happened within that context.
Caching: (TODO): https://github.com/pantsbuild/pants/issues/5187
Currently it is disabled due to absolute paths in the coursier results.
:param targets: a collection of targets to do 3rdparty resolve against
:param compile_classpath: classpath product that holds the resolution result. IMPORTANT: this parameter will be changed.
:param sources: if True, fetch sources for 3rdparty
:param javadoc: if True, fetch javadoc for 3rdparty
:param executor: An instance of `pants.java.executor.Executor`. If None, a subprocess executor will be assigned.
:return: n/a | Below is the the instruction that describes the task:
### Input:
This is the core function for coursier resolve.
Validation strategy:
1. All targets are going through the `invalidated` to get fingerprinted in the target level.
No cache is fetched at this stage because it is disabled.
2. Once each target is fingerprinted, we combine them into a `VersionedTargetSet` where they
are fingerprinted together, because each run of 3rdparty resolve is context sensitive.
Artifacts are stored in `VersionedTargetSet`'s results_dir, the contents are the aggregation of
each coursier run happened within that context.
Caching: (TODO): https://github.com/pantsbuild/pants/issues/5187
Currently it is disabled due to absolute paths in the coursier results.
:param targets: a collection of targets to do 3rdparty resolve against
:param compile_classpath: classpath product that holds the resolution result. IMPORTANT: this parameter will be changed.
:param sources: if True, fetch sources for 3rdparty
:param javadoc: if True, fetch javadoc for 3rdparty
:param executor: An instance of `pants.java.executor.Executor`. If None, a subprocess executor will be assigned.
:return: n/a
### Response:
def resolve(self, targets, compile_classpath, sources, javadoc, executor):
"""
This is the core function for coursier resolve.
Validation strategy:
1. All targets are going through the `invalidated` to get fingerprinted in the target level.
No cache is fetched at this stage because it is disabled.
2. Once each target is fingerprinted, we combine them into a `VersionedTargetSet` where they
are fingerprinted together, because each run of 3rdparty resolve is context sensitive.
Artifacts are stored in `VersionedTargetSet`'s results_dir, the contents are the aggregation of
each coursier run happened within that context.
Caching: (TODO): https://github.com/pantsbuild/pants/issues/5187
Currently it is disabled due to absolute paths in the coursier results.
:param targets: a collection of targets to do 3rdparty resolve against
:param compile_classpath: classpath product that holds the resolution result. IMPORTANT: this parameter will be changed.
:param sources: if True, fetch sources for 3rdparty
:param javadoc: if True, fetch javadoc for 3rdparty
:param executor: An instance of `pants.java.executor.Executor`. If None, a subprocess executor will be assigned.
:return: n/a
"""
manager = JarDependencyManagement.global_instance()
jar_targets = manager.targets_by_artifact_set(targets)
executor = executor or SubprocessExecutor(DistributionLocator.cached())
if not isinstance(executor, Executor):
raise ValueError('The executor argument must be an Executor instance, given {} of type {}'.format(
executor, type(executor)))
for artifact_set, target_subset in jar_targets.items():
# TODO(wisechengyi): this is the only place we are using IvyUtil method, which isn't specific to ivy really.
raw_jar_deps, global_excludes = IvyUtils.calculate_classpath(target_subset)
# ['sources'] * False = [], ['sources'] * True = ['sources']
confs_for_fingerprint = ['sources'] * sources + ['javadoc'] * javadoc
fp_strategy = CoursierResolveFingerprintStrategy(confs_for_fingerprint)
compile_classpath.add_excludes_for_targets(target_subset)
with self.invalidated(target_subset,
invalidate_dependents=False,
silent=False,
fingerprint_strategy=fp_strategy) as invalidation_check:
if not invalidation_check.all_vts:
continue
resolve_vts = VersionedTargetSet.from_versioned_targets(invalidation_check.all_vts)
vt_set_results_dir = self._prepare_vts_results_dir(resolve_vts)
pants_jar_base_dir = self._prepare_workdir()
coursier_cache_dir = CoursierSubsystem.global_instance().get_options().cache_dir
# If a report is requested, do not proceed with loading validated result.
if not self.get_options().report:
# Check each individual target without context first
# If the individuals are valid, check them as a VersionedTargetSet
if not invalidation_check.invalid_vts and resolve_vts.valid:
# Load up from the results dir
success = self._load_from_results_dir(compile_classpath, vt_set_results_dir,
coursier_cache_dir, invalidation_check, pants_jar_base_dir)
if success:
return
jars_to_resolve, pinned_coords = self._compute_jars_to_resolve_and_pin(raw_jar_deps,
artifact_set,
manager)
results = self._get_result_from_coursier(jars_to_resolve, global_excludes, pinned_coords,
coursier_cache_dir, sources, javadoc, executor)
for conf, result_list in results.items():
for result in result_list:
self._load_json_result(conf, compile_classpath, coursier_cache_dir, invalidation_check,
pants_jar_base_dir, result, self._override_classifiers_for_conf(conf))
self._populate_results_dir(vt_set_results_dir, results)
resolve_vts.update() |
def valid_path(path):
'''
Check if an entry in the class path exists as either a directory or a file
'''
# check if the suffic of classpath suffix exists as directory
if path.endswith('*'):
Log.debug('Checking classpath entry suffix as directory: %s', path[:-1])
if os.path.isdir(path[:-1]):
return True
return False
# check if the classpath entry is a directory
Log.debug('Checking classpath entry as directory: %s', path)
if os.path.isdir(path):
return True
else:
# check if the classpath entry is a file
Log.debug('Checking classpath entry as file: %s', path)
if os.path.isfile(path):
return True
return False | Check if an entry in the class path exists as either a directory or a file | Below is the the instruction that describes the task:
### Input:
Check if an entry in the class path exists as either a directory or a file
### Response:
def valid_path(path):
'''
Check if an entry in the class path exists as either a directory or a file
'''
# check if the suffic of classpath suffix exists as directory
if path.endswith('*'):
Log.debug('Checking classpath entry suffix as directory: %s', path[:-1])
if os.path.isdir(path[:-1]):
return True
return False
# check if the classpath entry is a directory
Log.debug('Checking classpath entry as directory: %s', path)
if os.path.isdir(path):
return True
else:
# check if the classpath entry is a file
Log.debug('Checking classpath entry as file: %s', path)
if os.path.isfile(path):
return True
return False |
async def fetchone(self):
""" Fetch next row """
self._check_executed()
row = await self._read_next()
if row is None:
return
self._rownumber += 1
return row | Fetch next row | Below is the the instruction that describes the task:
### Input:
Fetch next row
### Response:
async def fetchone(self):
""" Fetch next row """
self._check_executed()
row = await self._read_next()
if row is None:
return
self._rownumber += 1
return row |
def load_module(self, fullname):
"""Load a module if its name starts with :code:`self.group` and is registered."""
if fullname in sys.modules:
return sys.modules[fullname]
end_name = fullname[len(self._group_with_dot):]
for entry_point in iter_entry_points(group=self.group, name=end_name):
mod = entry_point.load()
sys.modules[fullname] = mod
return mod | Load a module if its name starts with :code:`self.group` and is registered. | Below is the the instruction that describes the task:
### Input:
Load a module if its name starts with :code:`self.group` and is registered.
### Response:
def load_module(self, fullname):
"""Load a module if its name starts with :code:`self.group` and is registered."""
if fullname in sys.modules:
return sys.modules[fullname]
end_name = fullname[len(self._group_with_dot):]
for entry_point in iter_entry_points(group=self.group, name=end_name):
mod = entry_point.load()
sys.modules[fullname] = mod
return mod |
def circle(radius=None, center=None, **kwargs):
"""
Create a Path2D containing a single or multiple rectangles
with the specified bounds.
Parameters
--------------
bounds : (2, 2) float, or (m, 2, 2) float
Minimum XY, Maximum XY
Returns
-------------
rect : Path2D
Path containing specified rectangles
"""
from .path import Path2D
if center is None:
center = [0.0, 0.0]
else:
center = np.asanyarray(center, dtype=np.float64)
if radius is None:
radius = 1.0
else:
radius = float(radius)
# (3, 2) float, points on arc
three = arc.to_threepoint(angles=[0, np.pi],
center=center,
radius=radius) + center
result = Path2D(entities=[entities.Arc(points=np.arange(3), closed=True)],
vertices=three,
**kwargs)
return result | Create a Path2D containing a single or multiple rectangles
with the specified bounds.
Parameters
--------------
bounds : (2, 2) float, or (m, 2, 2) float
Minimum XY, Maximum XY
Returns
-------------
rect : Path2D
Path containing specified rectangles | Below is the the instruction that describes the task:
### Input:
Create a Path2D containing a single or multiple rectangles
with the specified bounds.
Parameters
--------------
bounds : (2, 2) float, or (m, 2, 2) float
Minimum XY, Maximum XY
Returns
-------------
rect : Path2D
Path containing specified rectangles
### Response:
def circle(radius=None, center=None, **kwargs):
"""
Create a Path2D containing a single or multiple rectangles
with the specified bounds.
Parameters
--------------
bounds : (2, 2) float, or (m, 2, 2) float
Minimum XY, Maximum XY
Returns
-------------
rect : Path2D
Path containing specified rectangles
"""
from .path import Path2D
if center is None:
center = [0.0, 0.0]
else:
center = np.asanyarray(center, dtype=np.float64)
if radius is None:
radius = 1.0
else:
radius = float(radius)
# (3, 2) float, points on arc
three = arc.to_threepoint(angles=[0, np.pi],
center=center,
radius=radius) + center
result = Path2D(entities=[entities.Arc(points=np.arange(3), closed=True)],
vertices=three,
**kwargs)
return result |
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin):
"""This (somewhat unfortunately) is the main entrypoint to this class via the Runner. It handles
creation of the running nailgun server as well as creation of the client."""
classpath = self._nailgun_classpath + classpath
new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version)
with self._NAILGUN_SPAWN_LOCK:
running, updated = self._check_nailgun_state(new_fingerprint)
if running and updated:
logger.debug('Found running nailgun server that needs updating, killing {server}'
.format(server=self._identity))
self.terminate()
if (not running) or (running and updated):
return self._spawn_nailgun_server(new_fingerprint, jvm_options, classpath, stdout, stderr, stdin)
return self._create_ngclient(self.socket, stdout, stderr, stdin) | This (somewhat unfortunately) is the main entrypoint to this class via the Runner. It handles
creation of the running nailgun server as well as creation of the client. | Below is the the instruction that describes the task:
### Input:
This (somewhat unfortunately) is the main entrypoint to this class via the Runner. It handles
creation of the running nailgun server as well as creation of the client.
### Response:
def _get_nailgun_client(self, jvm_options, classpath, stdout, stderr, stdin):
"""This (somewhat unfortunately) is the main entrypoint to this class via the Runner. It handles
creation of the running nailgun server as well as creation of the client."""
classpath = self._nailgun_classpath + classpath
new_fingerprint = self._fingerprint(jvm_options, classpath, self._distribution.version)
with self._NAILGUN_SPAWN_LOCK:
running, updated = self._check_nailgun_state(new_fingerprint)
if running and updated:
logger.debug('Found running nailgun server that needs updating, killing {server}'
.format(server=self._identity))
self.terminate()
if (not running) or (running and updated):
return self._spawn_nailgun_server(new_fingerprint, jvm_options, classpath, stdout, stderr, stdin)
return self._create_ngclient(self.socket, stdout, stderr, stdin) |
def readObject(self):
"""read object"""
try:
_, res = self._read_and_exec_opcode(ident=0)
position_bak = self.object_stream.tell()
the_rest = self.object_stream.read()
if len(the_rest):
log_error("Warning!!!!: Stream still has %s bytes left.\
Enable debug mode of logging to see the hexdump." % len(the_rest))
log_debug(self._create_hexdump(the_rest))
else:
log_debug("Java Object unmarshalled succesfully!")
self.object_stream.seek(position_bak)
return res
except Exception:
self._oops_dump_state()
raise | read object | Below is the the instruction that describes the task:
### Input:
read object
### Response:
def readObject(self):
"""read object"""
try:
_, res = self._read_and_exec_opcode(ident=0)
position_bak = self.object_stream.tell()
the_rest = self.object_stream.read()
if len(the_rest):
log_error("Warning!!!!: Stream still has %s bytes left.\
Enable debug mode of logging to see the hexdump." % len(the_rest))
log_debug(self._create_hexdump(the_rest))
else:
log_debug("Java Object unmarshalled succesfully!")
self.object_stream.seek(position_bak)
return res
except Exception:
self._oops_dump_state()
raise |
def _get_environment_details(python_bin: str) -> list:
"""Get information about packages in environment where packages get installed."""
cmd = "{} -m pipdeptree --json".format(python_bin)
output = run_command(cmd, is_json=True).stdout
return [_create_entry(entry) for entry in output] | Get information about packages in environment where packages get installed. | Below is the the instruction that describes the task:
### Input:
Get information about packages in environment where packages get installed.
### Response:
def _get_environment_details(python_bin: str) -> list:
"""Get information about packages in environment where packages get installed."""
cmd = "{} -m pipdeptree --json".format(python_bin)
output = run_command(cmd, is_json=True).stdout
return [_create_entry(entry) for entry in output] |
def save_attrgetter(self, obj):
"""attrgetter serializer"""
class Dummy(object):
def __init__(self, attrs, index=None):
self.attrs = attrs
self.index = index
def __getattribute__(self, item):
attrs = object.__getattribute__(self, "attrs")
index = object.__getattribute__(self, "index")
if index is None:
index = len(attrs)
attrs.append(item)
else:
attrs[index] = ".".join([attrs[index], item])
return type(self)(attrs, index)
attrs = []
obj(Dummy(attrs))
return self.save_reduce(operator.attrgetter, tuple(attrs)) | attrgetter serializer | Below is the the instruction that describes the task:
### Input:
attrgetter serializer
### Response:
def save_attrgetter(self, obj):
"""attrgetter serializer"""
class Dummy(object):
def __init__(self, attrs, index=None):
self.attrs = attrs
self.index = index
def __getattribute__(self, item):
attrs = object.__getattribute__(self, "attrs")
index = object.__getattribute__(self, "index")
if index is None:
index = len(attrs)
attrs.append(item)
else:
attrs[index] = ".".join([attrs[index], item])
return type(self)(attrs, index)
attrs = []
obj(Dummy(attrs))
return self.save_reduce(operator.attrgetter, tuple(attrs)) |
def send_sci(self, operation, target, payload, reply=None, synchronous=None, sync_timeout=None,
cache=None, allow_offline=None, wait_for_reconnect=None):
"""Send SCI request to 1 or more targets
:param str operation: The operation is one of {send_message, update_firmware, disconnect, query_firmware_targets,
file_system, data_service, and reboot}
:param target: The device(s) to be targeted with this request
:type target: :class:`~.TargetABC` or list of :class:`~.TargetABC` instances
TODO: document other params
"""
if not isinstance(payload, six.string_types) and not isinstance(payload, six.binary_type):
raise TypeError("payload is required to be a string or bytes")
# validate targets and bulid targets xml section
try:
iter(target)
targets = target
except TypeError:
targets = [target, ]
if not all(isinstance(t, TargetABC) for t in targets):
raise TypeError("Target(s) must each be instances of TargetABC")
targets_xml = "".join(t.to_xml() for t in targets)
# reply argument
if not isinstance(reply, (type(None), six.string_types)):
raise TypeError("reply must be either None or a string")
if reply is not None:
reply_xml = ' reply="{}"'.format(reply)
else:
reply_xml = ''
# synchronous argument
if not isinstance(synchronous, (type(None), bool)):
raise TypeError("synchronous expected to be either None or a boolean")
if synchronous is not None:
synchronous_xml = ' synchronous="{}"'.format('true' if synchronous else 'false')
else:
synchronous_xml = ''
# sync_timeout argument
# TODO: What units is syncTimeout in? seconds?
if sync_timeout is not None and not isinstance(sync_timeout, six.integer_types):
raise TypeError("sync_timeout expected to either be None or a number")
if sync_timeout is not None:
sync_timeout_xml = ' syncTimeout="{}"'.format(sync_timeout)
else:
sync_timeout_xml = ''
# cache argument
if not isinstance(cache, (type(None), bool)):
raise TypeError("cache expected to either be None or a boolean")
if cache is not None:
cache_xml = ' cache="{}"'.format('true' if cache else 'false')
else:
cache_xml = ''
# allow_offline argument
if not isinstance(allow_offline, (type(None), bool)):
raise TypeError("allow_offline is expected to be either None or a boolean")
if allow_offline is not None:
allow_offline_xml = ' allowOffline="{}"'.format('true' if allow_offline else 'false')
else:
allow_offline_xml = ''
# wait_for_reconnect argument
if not isinstance(wait_for_reconnect, (type(None), bool)):
raise TypeError("wait_for_reconnect expected to be either None or a boolean")
if wait_for_reconnect is not None:
wait_for_reconnect_xml = ' waitForReconnect="{}"'.format('true' if wait_for_reconnect else 'false')
else:
wait_for_reconnect_xml = ''
full_request = SCI_TEMPLATE.format(
operation=operation,
targets=targets_xml,
reply=reply_xml,
synchronous=synchronous_xml,
sync_timeout=sync_timeout_xml,
cache=cache_xml,
allow_offline=allow_offline_xml,
wait_for_reconnect=wait_for_reconnect_xml,
payload=payload
)
# TODO: do parsing here?
return self._conn.post("/ws/sci", full_request) | Send SCI request to 1 or more targets
:param str operation: The operation is one of {send_message, update_firmware, disconnect, query_firmware_targets,
file_system, data_service, and reboot}
:param target: The device(s) to be targeted with this request
:type target: :class:`~.TargetABC` or list of :class:`~.TargetABC` instances
TODO: document other params | Below is the the instruction that describes the task:
### Input:
Send SCI request to 1 or more targets
:param str operation: The operation is one of {send_message, update_firmware, disconnect, query_firmware_targets,
file_system, data_service, and reboot}
:param target: The device(s) to be targeted with this request
:type target: :class:`~.TargetABC` or list of :class:`~.TargetABC` instances
TODO: document other params
### Response:
def send_sci(self, operation, target, payload, reply=None, synchronous=None, sync_timeout=None,
cache=None, allow_offline=None, wait_for_reconnect=None):
"""Send SCI request to 1 or more targets
:param str operation: The operation is one of {send_message, update_firmware, disconnect, query_firmware_targets,
file_system, data_service, and reboot}
:param target: The device(s) to be targeted with this request
:type target: :class:`~.TargetABC` or list of :class:`~.TargetABC` instances
TODO: document other params
"""
if not isinstance(payload, six.string_types) and not isinstance(payload, six.binary_type):
raise TypeError("payload is required to be a string or bytes")
# validate targets and bulid targets xml section
try:
iter(target)
targets = target
except TypeError:
targets = [target, ]
if not all(isinstance(t, TargetABC) for t in targets):
raise TypeError("Target(s) must each be instances of TargetABC")
targets_xml = "".join(t.to_xml() for t in targets)
# reply argument
if not isinstance(reply, (type(None), six.string_types)):
raise TypeError("reply must be either None or a string")
if reply is not None:
reply_xml = ' reply="{}"'.format(reply)
else:
reply_xml = ''
# synchronous argument
if not isinstance(synchronous, (type(None), bool)):
raise TypeError("synchronous expected to be either None or a boolean")
if synchronous is not None:
synchronous_xml = ' synchronous="{}"'.format('true' if synchronous else 'false')
else:
synchronous_xml = ''
# sync_timeout argument
# TODO: What units is syncTimeout in? seconds?
if sync_timeout is not None and not isinstance(sync_timeout, six.integer_types):
raise TypeError("sync_timeout expected to either be None or a number")
if sync_timeout is not None:
sync_timeout_xml = ' syncTimeout="{}"'.format(sync_timeout)
else:
sync_timeout_xml = ''
# cache argument
if not isinstance(cache, (type(None), bool)):
raise TypeError("cache expected to either be None or a boolean")
if cache is not None:
cache_xml = ' cache="{}"'.format('true' if cache else 'false')
else:
cache_xml = ''
# allow_offline argument
if not isinstance(allow_offline, (type(None), bool)):
raise TypeError("allow_offline is expected to be either None or a boolean")
if allow_offline is not None:
allow_offline_xml = ' allowOffline="{}"'.format('true' if allow_offline else 'false')
else:
allow_offline_xml = ''
# wait_for_reconnect argument
if not isinstance(wait_for_reconnect, (type(None), bool)):
raise TypeError("wait_for_reconnect expected to be either None or a boolean")
if wait_for_reconnect is not None:
wait_for_reconnect_xml = ' waitForReconnect="{}"'.format('true' if wait_for_reconnect else 'false')
else:
wait_for_reconnect_xml = ''
full_request = SCI_TEMPLATE.format(
operation=operation,
targets=targets_xml,
reply=reply_xml,
synchronous=synchronous_xml,
sync_timeout=sync_timeout_xml,
cache=cache_xml,
allow_offline=allow_offline_xml,
wait_for_reconnect=wait_for_reconnect_xml,
payload=payload
)
# TODO: do parsing here?
return self._conn.post("/ws/sci", full_request) |
def classificationgroup(self):
"""List with (subject group ID, number of documents)-tuples."""
path = ['author-profile', 'classificationgroup', 'classifications',
'classification']
out = [(item['$'], item['@frequency']) for item in
listify(chained_get(self._json, path, []))]
return out or None | List with (subject group ID, number of documents)-tuples. | Below is the the instruction that describes the task:
### Input:
List with (subject group ID, number of documents)-tuples.
### Response:
def classificationgroup(self):
"""List with (subject group ID, number of documents)-tuples."""
path = ['author-profile', 'classificationgroup', 'classifications',
'classification']
out = [(item['$'], item['@frequency']) for item in
listify(chained_get(self._json, path, []))]
return out or None |
def inject_instance(self, classkey=None, allow_override=False,
verbose=VERBOSE_CLASS, strict=True):
"""
Injects an instance (self) of type (classkey)
with all functions registered to (classkey)
call this in the __init__ class function
Args:
self: the class instance
classkey: key for a class, preferably the class type itself, but it
doesnt have to be
SeeAlso:
make_class_method_decorator
Example:
>>> # DISABLE_DOCTEST
>>> # DOCTEST_DISABLE
>>> utool.make_class_method_decorator(InvertedIndex)(smk_debug.invindex_dbgstr)
>>> utool.inject_instance(invindex)
"""
import utool as ut
if verbose:
print('[util_class] begin inject_instance')
try:
if classkey is None:
# Probably should depricate this block of code
# It tries to do too much
classkey = self.__class__
if classkey == 'ibeis.gui.models_and_views.IBEISTableView':
# HACK HACK HACK
# from guitool.__PYQT__ import QtGui # NOQA
from guitool.__PYQT__ import QtWidgets # NOQA
classkey = QtWidgets.QAbstractItemView
if len(__CLASSTYPE_ATTRIBUTES__[classkey]) == 0:
print('[utool] Warning: no classes of type %r are registered' % (classkey,))
print('[utool] type(self)=%r, self=%r' % (type(self), self)),
print('[utool] Checking to see if anybody else was registered...')
print('[utool] __CLASSTYPE_ATTRIBUTES__ = ' +
ut.repr4(__CLASSTYPE_ATTRIBUTES__.keys()))
for classtype_, _ in six.iteritems(__CLASSTYPE_ATTRIBUTES__):
isinstance(self, classtype_)
classkey = classtype_
print('[utool] Warning: using subclass=%r' % (classtype_,))
break
func_list = __CLASSTYPE_ATTRIBUTES__[classkey]
if verbose:
print('[util_class] injecting %d methods\n with classkey=%r\n into %r'
% (len(func_list), classkey, self,))
for func in func_list:
if VERBOSE_CLASS:
print('[util_class] * injecting %r' % (func,))
method_name = None
# Allow user to register tuples for aliases
if isinstance(func, tuple):
func, method_name = func
inject_func_as_method(self, func, method_name=method_name,
allow_override=allow_override, verbose=verbose)
except Exception as ex:
ut.printex(ex, 'ISSUE WHEN INJECTING %r' % (classkey,),
iswarning=not strict)
if strict:
raise | Injects an instance (self) of type (classkey)
with all functions registered to (classkey)
call this in the __init__ class function
Args:
self: the class instance
classkey: key for a class, preferably the class type itself, but it
doesnt have to be
SeeAlso:
make_class_method_decorator
Example:
>>> # DISABLE_DOCTEST
>>> # DOCTEST_DISABLE
>>> utool.make_class_method_decorator(InvertedIndex)(smk_debug.invindex_dbgstr)
>>> utool.inject_instance(invindex) | Below is the the instruction that describes the task:
### Input:
Injects an instance (self) of type (classkey)
with all functions registered to (classkey)
call this in the __init__ class function
Args:
self: the class instance
classkey: key for a class, preferably the class type itself, but it
doesnt have to be
SeeAlso:
make_class_method_decorator
Example:
>>> # DISABLE_DOCTEST
>>> # DOCTEST_DISABLE
>>> utool.make_class_method_decorator(InvertedIndex)(smk_debug.invindex_dbgstr)
>>> utool.inject_instance(invindex)
### Response:
def inject_instance(self, classkey=None, allow_override=False,
verbose=VERBOSE_CLASS, strict=True):
"""
Injects an instance (self) of type (classkey)
with all functions registered to (classkey)
call this in the __init__ class function
Args:
self: the class instance
classkey: key for a class, preferably the class type itself, but it
doesnt have to be
SeeAlso:
make_class_method_decorator
Example:
>>> # DISABLE_DOCTEST
>>> # DOCTEST_DISABLE
>>> utool.make_class_method_decorator(InvertedIndex)(smk_debug.invindex_dbgstr)
>>> utool.inject_instance(invindex)
"""
import utool as ut
if verbose:
print('[util_class] begin inject_instance')
try:
if classkey is None:
# Probably should depricate this block of code
# It tries to do too much
classkey = self.__class__
if classkey == 'ibeis.gui.models_and_views.IBEISTableView':
# HACK HACK HACK
# from guitool.__PYQT__ import QtGui # NOQA
from guitool.__PYQT__ import QtWidgets # NOQA
classkey = QtWidgets.QAbstractItemView
if len(__CLASSTYPE_ATTRIBUTES__[classkey]) == 0:
print('[utool] Warning: no classes of type %r are registered' % (classkey,))
print('[utool] type(self)=%r, self=%r' % (type(self), self)),
print('[utool] Checking to see if anybody else was registered...')
print('[utool] __CLASSTYPE_ATTRIBUTES__ = ' +
ut.repr4(__CLASSTYPE_ATTRIBUTES__.keys()))
for classtype_, _ in six.iteritems(__CLASSTYPE_ATTRIBUTES__):
isinstance(self, classtype_)
classkey = classtype_
print('[utool] Warning: using subclass=%r' % (classtype_,))
break
func_list = __CLASSTYPE_ATTRIBUTES__[classkey]
if verbose:
print('[util_class] injecting %d methods\n with classkey=%r\n into %r'
% (len(func_list), classkey, self,))
for func in func_list:
if VERBOSE_CLASS:
print('[util_class] * injecting %r' % (func,))
method_name = None
# Allow user to register tuples for aliases
if isinstance(func, tuple):
func, method_name = func
inject_func_as_method(self, func, method_name=method_name,
allow_override=allow_override, verbose=verbose)
except Exception as ex:
ut.printex(ex, 'ISSUE WHEN INJECTING %r' % (classkey,),
iswarning=not strict)
if strict:
raise |
def _coords2vec(self, coords):
"""
Converts from sky coordinates to unit vectors. Before conversion to unit
vectors, the coordiantes are transformed to the coordinate system used
internally by the :obj:`UnstructuredDustMap`, which can be set during
initialization of the class.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): Input coordinates to
convert to unit vectors.
Returns:
Cartesian unit vectors corresponding to the input coordinates, after
transforming to the coordinate system used internally by the
:obj:`UnstructuredDustMap`.
"""
# c = coords.transform_to(self._frame)
# vec = np.empty((c.shape[0], 2), dtype='f8')
# vec[:,0] = coordinates.Longitude(coords.l, wrap_angle=360.*units.deg).deg[:]
# vec[:,1] = coords.b.deg[:]
# return np.radians(vec)
c = coords.transform_to(self._frame).represent_as('cartesian')
vec_norm = np.sqrt(c.x**2 + c.y**2 + c.z**2)
vec = np.empty((c.shape[0], 3), dtype=c.x.dtype)
vec[:,0] = (c.x / vec_norm).value[:]
vec[:,1] = (c.y / vec_norm).value[:]
vec[:,2] = (c.z / vec_norm).value[:]
return vec | Converts from sky coordinates to unit vectors. Before conversion to unit
vectors, the coordiantes are transformed to the coordinate system used
internally by the :obj:`UnstructuredDustMap`, which can be set during
initialization of the class.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): Input coordinates to
convert to unit vectors.
Returns:
Cartesian unit vectors corresponding to the input coordinates, after
transforming to the coordinate system used internally by the
:obj:`UnstructuredDustMap`. | Below is the the instruction that describes the task:
### Input:
Converts from sky coordinates to unit vectors. Before conversion to unit
vectors, the coordiantes are transformed to the coordinate system used
internally by the :obj:`UnstructuredDustMap`, which can be set during
initialization of the class.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): Input coordinates to
convert to unit vectors.
Returns:
Cartesian unit vectors corresponding to the input coordinates, after
transforming to the coordinate system used internally by the
:obj:`UnstructuredDustMap`.
### Response:
def _coords2vec(self, coords):
"""
Converts from sky coordinates to unit vectors. Before conversion to unit
vectors, the coordiantes are transformed to the coordinate system used
internally by the :obj:`UnstructuredDustMap`, which can be set during
initialization of the class.
Args:
coords (:obj:`astropy.coordinates.SkyCoord`): Input coordinates to
convert to unit vectors.
Returns:
Cartesian unit vectors corresponding to the input coordinates, after
transforming to the coordinate system used internally by the
:obj:`UnstructuredDustMap`.
"""
# c = coords.transform_to(self._frame)
# vec = np.empty((c.shape[0], 2), dtype='f8')
# vec[:,0] = coordinates.Longitude(coords.l, wrap_angle=360.*units.deg).deg[:]
# vec[:,1] = coords.b.deg[:]
# return np.radians(vec)
c = coords.transform_to(self._frame).represent_as('cartesian')
vec_norm = np.sqrt(c.x**2 + c.y**2 + c.z**2)
vec = np.empty((c.shape[0], 3), dtype=c.x.dtype)
vec[:,0] = (c.x / vec_norm).value[:]
vec[:,1] = (c.y / vec_norm).value[:]
vec[:,2] = (c.z / vec_norm).value[:]
return vec |
def _make_request_to_broker(self, broker, requestId, request, **kwArgs):
"""Send a request to the specified broker."""
def _timeout_request(broker, requestId):
"""The time we allotted for the request expired, cancel it."""
try:
# FIXME: This should be done by calling .cancel() on the Deferred
# returned by the broker client.
broker.cancelRequest(requestId, reason=RequestTimedOutError(
'Request: {} cancelled due to timeout'.format(requestId)))
except KeyError: # pragma: no cover This should never happen...
log.exception('ERROR: Failed to find key for timed-out '
'request. Broker: %r Req: %d',
broker, requestId)
raise
if self._disconnect_on_timeout:
broker.disconnect()
def _alert_blocked_reactor(timeout, start):
"""Complain if this timer didn't fire before the timeout elapsed"""
now = self.reactor.seconds()
if now >= (start + timeout):
log.warning('Reactor was starved for %r seconds', now - start)
def _cancel_timeout(result, dc):
"""Request completed/cancelled, cancel the timeout delayedCall."""
if dc.active():
dc.cancel()
return result
# Make the request to the specified broker
log.debug('_mrtb: sending request: %d to broker: %r',
requestId, broker)
d = broker.makeRequest(requestId, request, **kwArgs)
# Set a delayedCall to fire if we don't get a reply in time
dc = self.reactor.callLater(
self.timeout, _timeout_request, broker, requestId)
# Set a delayedCall to complain if the reactor has been blocked
rc = self.reactor.callLater(
(self.timeout * 0.9), _alert_blocked_reactor, self.timeout,
self.reactor.seconds())
# Setup a callback on the request deferred to cancel both callLater
d.addBoth(_cancel_timeout, dc)
d.addBoth(_cancel_timeout, rc)
return d | Send a request to the specified broker. | Below is the the instruction that describes the task:
### Input:
Send a request to the specified broker.
### Response:
def _make_request_to_broker(self, broker, requestId, request, **kwArgs):
"""Send a request to the specified broker."""
def _timeout_request(broker, requestId):
"""The time we allotted for the request expired, cancel it."""
try:
# FIXME: This should be done by calling .cancel() on the Deferred
# returned by the broker client.
broker.cancelRequest(requestId, reason=RequestTimedOutError(
'Request: {} cancelled due to timeout'.format(requestId)))
except KeyError: # pragma: no cover This should never happen...
log.exception('ERROR: Failed to find key for timed-out '
'request. Broker: %r Req: %d',
broker, requestId)
raise
if self._disconnect_on_timeout:
broker.disconnect()
def _alert_blocked_reactor(timeout, start):
"""Complain if this timer didn't fire before the timeout elapsed"""
now = self.reactor.seconds()
if now >= (start + timeout):
log.warning('Reactor was starved for %r seconds', now - start)
def _cancel_timeout(result, dc):
"""Request completed/cancelled, cancel the timeout delayedCall."""
if dc.active():
dc.cancel()
return result
# Make the request to the specified broker
log.debug('_mrtb: sending request: %d to broker: %r',
requestId, broker)
d = broker.makeRequest(requestId, request, **kwArgs)
# Set a delayedCall to fire if we don't get a reply in time
dc = self.reactor.callLater(
self.timeout, _timeout_request, broker, requestId)
# Set a delayedCall to complain if the reactor has been blocked
rc = self.reactor.callLater(
(self.timeout * 0.9), _alert_blocked_reactor, self.timeout,
self.reactor.seconds())
# Setup a callback on the request deferred to cancel both callLater
d.addBoth(_cancel_timeout, dc)
d.addBoth(_cancel_timeout, rc)
return d |
def OnRemoveReaders(self, removedreaders):
"""Called when a reader is removed.
Removes the reader from the smartcard readers tree."""
self.mutex.acquire()
try:
parentnode = self.root
for readertoremove in removedreaders:
(childReader, cookie) = self.GetFirstChild(parentnode)
while childReader.IsOk():
if self.GetItemText(childReader) == str(readertoremove):
self.Delete(childReader)
else:
(childReader, cookie) = \
self.GetNextChild(parentnode, cookie)
self.Expand(self.root)
finally:
self.mutex.release()
self.EnsureVisible(self.root)
self.Repaint() | Called when a reader is removed.
Removes the reader from the smartcard readers tree. | Below is the the instruction that describes the task:
### Input:
Called when a reader is removed.
Removes the reader from the smartcard readers tree.
### Response:
def OnRemoveReaders(self, removedreaders):
"""Called when a reader is removed.
Removes the reader from the smartcard readers tree."""
self.mutex.acquire()
try:
parentnode = self.root
for readertoremove in removedreaders:
(childReader, cookie) = self.GetFirstChild(parentnode)
while childReader.IsOk():
if self.GetItemText(childReader) == str(readertoremove):
self.Delete(childReader)
else:
(childReader, cookie) = \
self.GetNextChild(parentnode, cookie)
self.Expand(self.root)
finally:
self.mutex.release()
self.EnsureVisible(self.root)
self.Repaint() |
def _fusion_to_dsl(tokens) -> FusionBase:
"""Convert a PyParsing data dictionary to a PyBEL fusion data dictionary.
:param tokens: A PyParsing data dictionary representing a fusion
:type tokens: ParseResult
"""
func = tokens[FUNCTION]
fusion_dsl = FUNC_TO_FUSION_DSL[func]
member_dsl = FUNC_TO_DSL[func]
partner_5p = member_dsl(
namespace=tokens[FUSION][PARTNER_5P][NAMESPACE],
name=tokens[FUSION][PARTNER_5P][NAME]
)
partner_3p = member_dsl(
namespace=tokens[FUSION][PARTNER_3P][NAMESPACE],
name=tokens[FUSION][PARTNER_3P][NAME]
)
range_5p = _fusion_range_to_dsl(tokens[FUSION][RANGE_5P])
range_3p = _fusion_range_to_dsl(tokens[FUSION][RANGE_3P])
return fusion_dsl(
partner_5p=partner_5p,
partner_3p=partner_3p,
range_5p=range_5p,
range_3p=range_3p,
) | Convert a PyParsing data dictionary to a PyBEL fusion data dictionary.
:param tokens: A PyParsing data dictionary representing a fusion
:type tokens: ParseResult | Below is the the instruction that describes the task:
### Input:
Convert a PyParsing data dictionary to a PyBEL fusion data dictionary.
:param tokens: A PyParsing data dictionary representing a fusion
:type tokens: ParseResult
### Response:
def _fusion_to_dsl(tokens) -> FusionBase:
"""Convert a PyParsing data dictionary to a PyBEL fusion data dictionary.
:param tokens: A PyParsing data dictionary representing a fusion
:type tokens: ParseResult
"""
func = tokens[FUNCTION]
fusion_dsl = FUNC_TO_FUSION_DSL[func]
member_dsl = FUNC_TO_DSL[func]
partner_5p = member_dsl(
namespace=tokens[FUSION][PARTNER_5P][NAMESPACE],
name=tokens[FUSION][PARTNER_5P][NAME]
)
partner_3p = member_dsl(
namespace=tokens[FUSION][PARTNER_3P][NAMESPACE],
name=tokens[FUSION][PARTNER_3P][NAME]
)
range_5p = _fusion_range_to_dsl(tokens[FUSION][RANGE_5P])
range_3p = _fusion_range_to_dsl(tokens[FUSION][RANGE_3P])
return fusion_dsl(
partner_5p=partner_5p,
partner_3p=partner_3p,
range_5p=range_5p,
range_3p=range_3p,
) |
def _get_geocoding(self, key, location):
"""Lookup the Google geocoding API information for `key`"""
url = self._location_query_base % quote_plus(key)
if self.api_key:
url += "&key=%s" % self.api_key
data = self._read_from_url(url)
response = json.loads(data)
if response["status"] == "OK":
formatted_address = response["results"][0]["formatted_address"]
pos = formatted_address.find(",")
if pos == -1:
location.name = formatted_address
location.region = ""
else:
location.name = formatted_address[:pos].strip()
location.region = formatted_address[pos + 1 :].strip()
geo_location = response["results"][0]["geometry"]["location"]
location.latitude = float(geo_location["lat"])
location.longitude = float(geo_location["lng"])
else:
raise AstralError("GoogleGeocoder: Unable to locate %s. Server Response=%s" %
(key, response["status"])) | Lookup the Google geocoding API information for `key` | Below is the the instruction that describes the task:
### Input:
Lookup the Google geocoding API information for `key`
### Response:
def _get_geocoding(self, key, location):
"""Lookup the Google geocoding API information for `key`"""
url = self._location_query_base % quote_plus(key)
if self.api_key:
url += "&key=%s" % self.api_key
data = self._read_from_url(url)
response = json.loads(data)
if response["status"] == "OK":
formatted_address = response["results"][0]["formatted_address"]
pos = formatted_address.find(",")
if pos == -1:
location.name = formatted_address
location.region = ""
else:
location.name = formatted_address[:pos].strip()
location.region = formatted_address[pos + 1 :].strip()
geo_location = response["results"][0]["geometry"]["location"]
location.latitude = float(geo_location["lat"])
location.longitude = float(geo_location["lng"])
else:
raise AstralError("GoogleGeocoder: Unable to locate %s. Server Response=%s" %
(key, response["status"])) |
def kernels_list(self, **kwargs): # noqa: E501
"""List kernels # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.kernels_list(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Page number
:param int page_size: Page size
:param str search: Search terms
:param str group: Display only your kernels
:param str user: Display kernels by a particular group
:param str language: Display kernels in a specific language
:param str kernel_type: Display kernels of a specific type
:param str output_type: Display kernels with a specific output type
:param str sort_by: Sort the results. 'relevance' only works if there is a search query
:param str dataset: Display kernels using the specified dataset
:param str competition: Display kernels using the specified competition
:param str parent_kernel: Display kernels that have forked the specified kernel
:return: Result
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.kernels_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.kernels_list_with_http_info(**kwargs) # noqa: E501
return data | List kernels # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.kernels_list(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Page number
:param int page_size: Page size
:param str search: Search terms
:param str group: Display only your kernels
:param str user: Display kernels by a particular group
:param str language: Display kernels in a specific language
:param str kernel_type: Display kernels of a specific type
:param str output_type: Display kernels with a specific output type
:param str sort_by: Sort the results. 'relevance' only works if there is a search query
:param str dataset: Display kernels using the specified dataset
:param str competition: Display kernels using the specified competition
:param str parent_kernel: Display kernels that have forked the specified kernel
:return: Result
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
List kernels # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.kernels_list(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Page number
:param int page_size: Page size
:param str search: Search terms
:param str group: Display only your kernels
:param str user: Display kernels by a particular group
:param str language: Display kernels in a specific language
:param str kernel_type: Display kernels of a specific type
:param str output_type: Display kernels with a specific output type
:param str sort_by: Sort the results. 'relevance' only works if there is a search query
:param str dataset: Display kernels using the specified dataset
:param str competition: Display kernels using the specified competition
:param str parent_kernel: Display kernels that have forked the specified kernel
:return: Result
If the method is called asynchronously,
returns the request thread.
### Response:
def kernels_list(self, **kwargs): # noqa: E501
"""List kernels # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.kernels_list(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page: Page number
:param int page_size: Page size
:param str search: Search terms
:param str group: Display only your kernels
:param str user: Display kernels by a particular group
:param str language: Display kernels in a specific language
:param str kernel_type: Display kernels of a specific type
:param str output_type: Display kernels with a specific output type
:param str sort_by: Sort the results. 'relevance' only works if there is a search query
:param str dataset: Display kernels using the specified dataset
:param str competition: Display kernels using the specified competition
:param str parent_kernel: Display kernels that have forked the specified kernel
:return: Result
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.kernels_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.kernels_list_with_http_info(**kwargs) # noqa: E501
return data |
def addslashes(s, escaped_chars=None):
"""Add slashes for given characters. Default is for ``\`` and ``'``.
:param s: string
:param escaped_chars: list of characters to prefix with a slash ``\``
:return: string with slashed characters
:rtype: str
:Example:
>>> addslashes("'")
"\\'"
"""
if escaped_chars is None:
escaped_chars = ["\\", "'", ]
# l = ["\\", '"', "'", "\0", ]
for i in escaped_chars:
if i in s:
s = s.replace(i, '\\' + i)
return s | Add slashes for given characters. Default is for ``\`` and ``'``.
:param s: string
:param escaped_chars: list of characters to prefix with a slash ``\``
:return: string with slashed characters
:rtype: str
:Example:
>>> addslashes("'")
"\\'" | Below is the the instruction that describes the task:
### Input:
Add slashes for given characters. Default is for ``\`` and ``'``.
:param s: string
:param escaped_chars: list of characters to prefix with a slash ``\``
:return: string with slashed characters
:rtype: str
:Example:
>>> addslashes("'")
"\\'"
### Response:
def addslashes(s, escaped_chars=None):
"""Add slashes for given characters. Default is for ``\`` and ``'``.
:param s: string
:param escaped_chars: list of characters to prefix with a slash ``\``
:return: string with slashed characters
:rtype: str
:Example:
>>> addslashes("'")
"\\'"
"""
if escaped_chars is None:
escaped_chars = ["\\", "'", ]
# l = ["\\", '"', "'", "\0", ]
for i in escaped_chars:
if i in s:
s = s.replace(i, '\\' + i)
return s |
def submit(recaptcha_response_field,
secret_key,
remoteip,
verify_server=VERIFY_SERVER):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_response_field -- The value from the form
secret_key -- your reCAPTCHA secret key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and len(recaptcha_response_field)):
return RecaptchaResponse(
is_valid=False,
error_code='incorrect-captcha-sol'
)
def encode_if_necessary(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
params = urllib.urlencode({
'secret': encode_if_necessary(secret_key),
'remoteip': encode_if_necessary(remoteip),
'response': encode_if_necessary(recaptcha_response_field),
})
request = Request(
url="https://%s/recaptcha/api/siteverify" % verify_server,
data=params,
headers={
"Content-type": "application/x-www-form-urlencoded",
"User-agent": "noReCAPTCHA Python"
}
)
httpresp = urlopen(request)
return_values = json.loads(httpresp.read())
httpresp.close()
return_code = return_values['success']
error_codes = return_values.get('error-codes', [])
if return_code:
return RecaptchaResponse(is_valid=True)
else:
return RecaptchaResponse(
is_valid=False,
error_code=error_codes
) | Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_response_field -- The value from the form
secret_key -- your reCAPTCHA secret key
remoteip -- the user's ip address | Below is the the instruction that describes the task:
### Input:
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_response_field -- The value from the form
secret_key -- your reCAPTCHA secret key
remoteip -- the user's ip address
### Response:
def submit(recaptcha_response_field,
secret_key,
remoteip,
verify_server=VERIFY_SERVER):
"""
Submits a reCAPTCHA request for verification. Returns RecaptchaResponse
for the request
recaptcha_response_field -- The value from the form
secret_key -- your reCAPTCHA secret key
remoteip -- the user's ip address
"""
if not (recaptcha_response_field and len(recaptcha_response_field)):
return RecaptchaResponse(
is_valid=False,
error_code='incorrect-captcha-sol'
)
def encode_if_necessary(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
params = urllib.urlencode({
'secret': encode_if_necessary(secret_key),
'remoteip': encode_if_necessary(remoteip),
'response': encode_if_necessary(recaptcha_response_field),
})
request = Request(
url="https://%s/recaptcha/api/siteverify" % verify_server,
data=params,
headers={
"Content-type": "application/x-www-form-urlencoded",
"User-agent": "noReCAPTCHA Python"
}
)
httpresp = urlopen(request)
return_values = json.loads(httpresp.read())
httpresp.close()
return_code = return_values['success']
error_codes = return_values.get('error-codes', [])
if return_code:
return RecaptchaResponse(is_valid=True)
else:
return RecaptchaResponse(
is_valid=False,
error_code=error_codes
) |
def _connect_docker(spec):
"""
Return ContextService arguments for a Docker connection.
"""
return {
'method': 'docker',
'kwargs': {
'username': spec.remote_user(),
'container': spec.remote_addr(),
'python_path': spec.python_path(),
'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
'remote_name': get_remote_name(spec),
}
} | Return ContextService arguments for a Docker connection. | Below is the the instruction that describes the task:
### Input:
Return ContextService arguments for a Docker connection.
### Response:
def _connect_docker(spec):
"""
Return ContextService arguments for a Docker connection.
"""
return {
'method': 'docker',
'kwargs': {
'username': spec.remote_user(),
'container': spec.remote_addr(),
'python_path': spec.python_path(),
'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
'remote_name': get_remote_name(spec),
}
} |
def form_invalid(self, form):
'''Builds the JSON for the errors'''
response = {self.errors_key: {}}
response[self.non_field_errors_key] = form.non_field_errors()
response.update(self.get_hidden_fields_errors(form))
for field in form.visible_fields():
if field.errors:
response[self.errors_key][field.html_name] = self._get_field_error_dict(field)
if self.include_success:
response[self.sucess_key] = False
return self._render_json(response) | Builds the JSON for the errors | Below is the the instruction that describes the task:
### Input:
Builds the JSON for the errors
### Response:
def form_invalid(self, form):
'''Builds the JSON for the errors'''
response = {self.errors_key: {}}
response[self.non_field_errors_key] = form.non_field_errors()
response.update(self.get_hidden_fields_errors(form))
for field in form.visible_fields():
if field.errors:
response[self.errors_key][field.html_name] = self._get_field_error_dict(field)
if self.include_success:
response[self.sucess_key] = False
return self._render_json(response) |
def _extract_elements(self, tree, element_type):
"""
extracts all element of type `element_type from the `_ElementTree`
representation of a SaltXML document and adds them to the corresponding
`SaltDocument` attributes, i.e. `self.nodes`, `self.edges` and
`self.layers`.
Parameters
----------
tree : lxml.etree._ElementTree
an ElementTree that represents a complete SaltXML document
element_type : str
the tag name of a SaltXML element, e.g. `nodes` or `edges`
"""
# creates a new attribute, e.g. 'self.nodes' and assigns it an
# empty list
setattr(self, element_type, [])
etree_elements = get_elements(tree, element_type)
for i, etree_element in enumerate(etree_elements):
# create an instance of an element class (e.g. TokenNode)
salt_element = create_class_instance(etree_element, i, self.doc_id)
# and add it to the corresponding element type list,
# e.g. 'self.nodes'
getattr(self, element_type).append(salt_element) | extracts all element of type `element_type from the `_ElementTree`
representation of a SaltXML document and adds them to the corresponding
`SaltDocument` attributes, i.e. `self.nodes`, `self.edges` and
`self.layers`.
Parameters
----------
tree : lxml.etree._ElementTree
an ElementTree that represents a complete SaltXML document
element_type : str
the tag name of a SaltXML element, e.g. `nodes` or `edges` | Below is the the instruction that describes the task:
### Input:
extracts all element of type `element_type from the `_ElementTree`
representation of a SaltXML document and adds them to the corresponding
`SaltDocument` attributes, i.e. `self.nodes`, `self.edges` and
`self.layers`.
Parameters
----------
tree : lxml.etree._ElementTree
an ElementTree that represents a complete SaltXML document
element_type : str
the tag name of a SaltXML element, e.g. `nodes` or `edges`
### Response:
def _extract_elements(self, tree, element_type):
"""
extracts all element of type `element_type from the `_ElementTree`
representation of a SaltXML document and adds them to the corresponding
`SaltDocument` attributes, i.e. `self.nodes`, `self.edges` and
`self.layers`.
Parameters
----------
tree : lxml.etree._ElementTree
an ElementTree that represents a complete SaltXML document
element_type : str
the tag name of a SaltXML element, e.g. `nodes` or `edges`
"""
# creates a new attribute, e.g. 'self.nodes' and assigns it an
# empty list
setattr(self, element_type, [])
etree_elements = get_elements(tree, element_type)
for i, etree_element in enumerate(etree_elements):
# create an instance of an element class (e.g. TokenNode)
salt_element = create_class_instance(etree_element, i, self.doc_id)
# and add it to the corresponding element type list,
# e.g. 'self.nodes'
getattr(self, element_type).append(salt_element) |
def get_gateway_id(self):
"""Return a unique id for the gateway."""
host, _ = self.server_address
try:
ip_address = ipaddress.ip_address(host)
except ValueError:
# Only hosts using ip address supports unique id.
return None
if ip_address.version == 6:
mac = get_mac_address(ip6=host)
else:
mac = get_mac_address(ip=host)
return mac | Return a unique id for the gateway. | Below is the the instruction that describes the task:
### Input:
Return a unique id for the gateway.
### Response:
def get_gateway_id(self):
"""Return a unique id for the gateway."""
host, _ = self.server_address
try:
ip_address = ipaddress.ip_address(host)
except ValueError:
# Only hosts using ip address supports unique id.
return None
if ip_address.version == 6:
mac = get_mac_address(ip6=host)
else:
mac = get_mac_address(ip=host)
return mac |
def from_pubkey(cls, pubkey, compressed=False, version=56, prefix=None):
# Ensure this is a public key
pubkey = PublicKey(pubkey)
if compressed:
pubkey = pubkey.compressed()
else:
pubkey = pubkey.uncompressed()
""" Derive address using ``RIPEMD160(SHA256(x))`` """
addressbin = ripemd160(hexlify(hashlib.sha256(unhexlify(pubkey)).digest()))
return cls(hexlify(addressbin).decode("ascii")) | Derive address using ``RIPEMD160(SHA256(x))`` | Below is the the instruction that describes the task:
### Input:
Derive address using ``RIPEMD160(SHA256(x))``
### Response:
def from_pubkey(cls, pubkey, compressed=False, version=56, prefix=None):
# Ensure this is a public key
pubkey = PublicKey(pubkey)
if compressed:
pubkey = pubkey.compressed()
else:
pubkey = pubkey.uncompressed()
""" Derive address using ``RIPEMD160(SHA256(x))`` """
addressbin = ripemd160(hexlify(hashlib.sha256(unhexlify(pubkey)).digest()))
return cls(hexlify(addressbin).decode("ascii")) |
def add(self, variant, arch, image):
"""
Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image`
"""
if arch not in productmd.common.RPM_ARCHES:
raise ValueError("Arch not found in RPM_ARCHES: %s" % arch)
if arch in ["src", "nosrc"]:
raise ValueError("Source arch is not allowed. Map source files under binary arches.")
if self.header.version_tuple >= (1, 1):
# disallow adding a different image with same 'unique'
# attributes. can't do this pre-1.1 as we couldn't truly
# identify images before subvariant
for checkvar in self.images:
for checkarch in self.images[checkvar]:
for curimg in self.images[checkvar][checkarch]:
if identify_image(curimg) == identify_image(image) and curimg.checksums != image.checksums:
raise ValueError("Image {0} shares all UNIQUE_IMAGE_ATTRIBUTES with "
"image {1}! This is forbidden.".format(image, curimg))
self.images.setdefault(variant, {}).setdefault(arch, set()).add(image) | Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image` | Below is the the instruction that describes the task:
### Input:
Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image`
### Response:
def add(self, variant, arch, image):
"""
Assign an :class:`.Image` object to variant and arch.
:param variant: compose variant UID
:type variant: str
:param arch: compose architecture
:type arch: str
:param image: image
:type image: :class:`.Image`
"""
if arch not in productmd.common.RPM_ARCHES:
raise ValueError("Arch not found in RPM_ARCHES: %s" % arch)
if arch in ["src", "nosrc"]:
raise ValueError("Source arch is not allowed. Map source files under binary arches.")
if self.header.version_tuple >= (1, 1):
# disallow adding a different image with same 'unique'
# attributes. can't do this pre-1.1 as we couldn't truly
# identify images before subvariant
for checkvar in self.images:
for checkarch in self.images[checkvar]:
for curimg in self.images[checkvar][checkarch]:
if identify_image(curimg) == identify_image(image) and curimg.checksums != image.checksums:
raise ValueError("Image {0} shares all UNIQUE_IMAGE_ATTRIBUTES with "
"image {1}! This is forbidden.".format(image, curimg))
self.images.setdefault(variant, {}).setdefault(arch, set()).add(image) |
def pipe_rename(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that renames or copies fields in the input source.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'RULE': [
{
'op': {'value': 'rename or copy'},
'field': {'value': 'old field'},
'newval': {'value': 'new field'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of items
"""
splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
_OUTPUT = parse_results(splits, **kwargs)
return _OUTPUT | An operator that renames or copies fields in the input source.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'RULE': [
{
'op': {'value': 'rename or copy'},
'field': {'value': 'old field'},
'newval': {'value': 'new field'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of items | Below is the the instruction that describes the task:
### Input:
An operator that renames or copies fields in the input source.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'RULE': [
{
'op': {'value': 'rename or copy'},
'field': {'value': 'old field'},
'newval': {'value': 'new field'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of items
### Response:
def pipe_rename(context=None, _INPUT=None, conf=None, **kwargs):
"""An operator that renames or copies fields in the input source.
Not loopable.
Parameters
----------
context : pipe2py.Context object
_INPUT : pipe2py.modules pipe like object (iterable of items)
conf : {
'RULE': [
{
'op': {'value': 'rename or copy'},
'field': {'value': 'old field'},
'newval': {'value': 'new field'}
}
]
}
kwargs : other inputs, e.g., to feed terminals for rule values
Returns
-------
_OUTPUT : generator of items
"""
splits = get_splits(_INPUT, conf['RULE'], **cdicts(opts, kwargs))
_OUTPUT = parse_results(splits, **kwargs)
return _OUTPUT |
def dom_lt(graph):
"""Dominator algorithm from Lengauer-Tarjan"""
def _dfs(v, n):
semi[v] = n = n + 1
vertex[n] = label[v] = v
ancestor[v] = 0
for w in graph.all_sucs(v):
if not semi[w]:
parent[w] = v
n = _dfs(w, n)
pred[w].add(v)
return n
def _compress(v):
u = ancestor[v]
if ancestor[u]:
_compress(u)
if semi[label[u]] < semi[label[v]]:
label[v] = label[u]
ancestor[v] = ancestor[u]
def _eval(v):
if ancestor[v]:
_compress(v)
return label[v]
return v
def _link(v, w):
ancestor[w] = v
parent, ancestor, vertex = {}, {}, {}
label, dom = {}, {}
pred, bucket = defaultdict(set), defaultdict(set)
# Step 1:
semi = {v: 0 for v in graph.nodes}
n = _dfs(graph.entry, 0)
for i in range(n, 1, -1):
w = vertex[i]
# Step 2:
for v in pred[w]:
u = _eval(v)
y = semi[w] = min(semi[w], semi[u])
bucket[vertex[y]].add(w)
pw = parent[w]
_link(pw, w)
# Step 3:
bpw = bucket[pw]
while bpw:
v = bpw.pop()
u = _eval(v)
dom[v] = u if semi[u] < semi[v] else pw
# Step 4:
for i in range(2, n + 1):
w = vertex[i]
dw = dom[w]
if dw != vertex[semi[w]]:
dom[w] = dom[dw]
dom[graph.entry] = None
return dom | Dominator algorithm from Lengauer-Tarjan | Below is the the instruction that describes the task:
### Input:
Dominator algorithm from Lengauer-Tarjan
### Response:
def dom_lt(graph):
"""Dominator algorithm from Lengauer-Tarjan"""
def _dfs(v, n):
semi[v] = n = n + 1
vertex[n] = label[v] = v
ancestor[v] = 0
for w in graph.all_sucs(v):
if not semi[w]:
parent[w] = v
n = _dfs(w, n)
pred[w].add(v)
return n
def _compress(v):
u = ancestor[v]
if ancestor[u]:
_compress(u)
if semi[label[u]] < semi[label[v]]:
label[v] = label[u]
ancestor[v] = ancestor[u]
def _eval(v):
if ancestor[v]:
_compress(v)
return label[v]
return v
def _link(v, w):
ancestor[w] = v
parent, ancestor, vertex = {}, {}, {}
label, dom = {}, {}
pred, bucket = defaultdict(set), defaultdict(set)
# Step 1:
semi = {v: 0 for v in graph.nodes}
n = _dfs(graph.entry, 0)
for i in range(n, 1, -1):
w = vertex[i]
# Step 2:
for v in pred[w]:
u = _eval(v)
y = semi[w] = min(semi[w], semi[u])
bucket[vertex[y]].add(w)
pw = parent[w]
_link(pw, w)
# Step 3:
bpw = bucket[pw]
while bpw:
v = bpw.pop()
u = _eval(v)
dom[v] = u if semi[u] < semi[v] else pw
# Step 4:
for i in range(2, n + 1):
w = vertex[i]
dw = dom[w]
if dw != vertex[semi[w]]:
dom[w] = dom[dw]
dom[graph.entry] = None
return dom |
def describe(name, tags=None, region=None, key=None, keyid=None,
profile=None):
'''
Return RDS instance details.
CLI example::
salt myminion boto_rds.describe myrds
'''
res = __salt__['boto_rds.exists'](name, tags, region, key, keyid,
profile)
if not res.get('exists'):
return {'exists': bool(res), 'message':
'RDS instance {0} does not exist.'.format(name)}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {'results': bool(conn)}
rds = conn.describe_db_instances(DBInstanceIdentifier=name)
rds = [
i for i in rds.get('DBInstances', [])
if i.get('DBInstanceIdentifier') == name
].pop(0)
if rds:
keys = ('DBInstanceIdentifier', 'DBInstanceClass', 'Engine',
'DBInstanceStatus', 'DBName', 'AllocatedStorage',
'PreferredBackupWindow', 'BackupRetentionPeriod',
'AvailabilityZone', 'PreferredMaintenanceWindow',
'LatestRestorableTime', 'EngineVersion',
'AutoMinorVersionUpgrade', 'LicenseModel',
'Iops', 'CharacterSetName', 'PubliclyAccessible',
'StorageType', 'TdeCredentialArn', 'DBInstancePort',
'DBClusterIdentifier', 'StorageEncrypted', 'KmsKeyId',
'DbiResourceId', 'CACertificateIdentifier',
'CopyTagsToSnapshot', 'MonitoringInterval',
'MonitoringRoleArn', 'PromotionTier',
'DomainMemberships')
return {'rds': dict([(k, rds.get(k)) for k in keys])}
else:
return {'rds': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
except IndexError:
return {'rds': None} | Return RDS instance details.
CLI example::
salt myminion boto_rds.describe myrds | Below is the the instruction that describes the task:
### Input:
Return RDS instance details.
CLI example::
salt myminion boto_rds.describe myrds
### Response:
def describe(name, tags=None, region=None, key=None, keyid=None,
profile=None):
'''
Return RDS instance details.
CLI example::
salt myminion boto_rds.describe myrds
'''
res = __salt__['boto_rds.exists'](name, tags, region, key, keyid,
profile)
if not res.get('exists'):
return {'exists': bool(res), 'message':
'RDS instance {0} does not exist.'.format(name)}
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return {'results': bool(conn)}
rds = conn.describe_db_instances(DBInstanceIdentifier=name)
rds = [
i for i in rds.get('DBInstances', [])
if i.get('DBInstanceIdentifier') == name
].pop(0)
if rds:
keys = ('DBInstanceIdentifier', 'DBInstanceClass', 'Engine',
'DBInstanceStatus', 'DBName', 'AllocatedStorage',
'PreferredBackupWindow', 'BackupRetentionPeriod',
'AvailabilityZone', 'PreferredMaintenanceWindow',
'LatestRestorableTime', 'EngineVersion',
'AutoMinorVersionUpgrade', 'LicenseModel',
'Iops', 'CharacterSetName', 'PubliclyAccessible',
'StorageType', 'TdeCredentialArn', 'DBInstancePort',
'DBClusterIdentifier', 'StorageEncrypted', 'KmsKeyId',
'DbiResourceId', 'CACertificateIdentifier',
'CopyTagsToSnapshot', 'MonitoringInterval',
'MonitoringRoleArn', 'PromotionTier',
'DomainMemberships')
return {'rds': dict([(k, rds.get(k)) for k in keys])}
else:
return {'rds': None}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
except IndexError:
return {'rds': None} |
def inference_q(self, next_action_arr):
'''
Infernce Q-Value.
Args:
next_action_arr: `np.ndarray` of action.
Returns:
`np.ndarray` of Q-Values.
'''
q_arr = next_action_arr.reshape((next_action_arr.shape[0], -1))
self.__q_arr_list.append(q_arr)
while len(self.__q_arr_list) > self.__seq_len:
self.__q_arr_list = self.__q_arr_list[1:]
while len(self.__q_arr_list) < self.__seq_len:
self.__q_arr_list.append(self.__q_arr_list[-1])
q_arr = np.array(self.__q_arr_list)
q_arr = q_arr.transpose((1, 0, 2))
q_arr = self.__lstm_model.inference(q_arr)
return q_arr[:, -1].reshape((q_arr.shape[0], 1)) | Infernce Q-Value.
Args:
next_action_arr: `np.ndarray` of action.
Returns:
`np.ndarray` of Q-Values. | Below is the the instruction that describes the task:
### Input:
Infernce Q-Value.
Args:
next_action_arr: `np.ndarray` of action.
Returns:
`np.ndarray` of Q-Values.
### Response:
def inference_q(self, next_action_arr):
'''
Infernce Q-Value.
Args:
next_action_arr: `np.ndarray` of action.
Returns:
`np.ndarray` of Q-Values.
'''
q_arr = next_action_arr.reshape((next_action_arr.shape[0], -1))
self.__q_arr_list.append(q_arr)
while len(self.__q_arr_list) > self.__seq_len:
self.__q_arr_list = self.__q_arr_list[1:]
while len(self.__q_arr_list) < self.__seq_len:
self.__q_arr_list.append(self.__q_arr_list[-1])
q_arr = np.array(self.__q_arr_list)
q_arr = q_arr.transpose((1, 0, 2))
q_arr = self.__lstm_model.inference(q_arr)
return q_arr[:, -1].reshape((q_arr.shape[0], 1)) |
def format(args):
"""
%prog format oldagpfile newagpfile
Reformat AGP file. --switch will replace the ids in the AGP file.
"""
from jcvi.formats.base import DictFile
p = OptionParser(format.__doc__)
p.add_option("--switchcomponent",
help="Switch component id based on")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
oldagpfile, newagpfile = args
switchcomponent = opts.switchcomponent
if switchcomponent:
switchcomponent = DictFile(switchcomponent)
agp = AGP(oldagpfile)
fw = open(newagpfile, "w")
nconverts = 0
for i, a in enumerate(agp):
if not a.is_gap and a.component_id in switchcomponent:
oldid = a.component_id
newid = switchcomponent[a.component_id]
a.component_id = newid
logging.debug("Covert {0} to {1} on line {2}".\
format(oldid, newid, i+1))
nconverts += 1
print(a, file=fw)
logging.debug("Total converted records: {0}".format(nconverts)) | %prog format oldagpfile newagpfile
Reformat AGP file. --switch will replace the ids in the AGP file. | Below is the the instruction that describes the task:
### Input:
%prog format oldagpfile newagpfile
Reformat AGP file. --switch will replace the ids in the AGP file.
### Response:
def format(args):
"""
%prog format oldagpfile newagpfile
Reformat AGP file. --switch will replace the ids in the AGP file.
"""
from jcvi.formats.base import DictFile
p = OptionParser(format.__doc__)
p.add_option("--switchcomponent",
help="Switch component id based on")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
oldagpfile, newagpfile = args
switchcomponent = opts.switchcomponent
if switchcomponent:
switchcomponent = DictFile(switchcomponent)
agp = AGP(oldagpfile)
fw = open(newagpfile, "w")
nconverts = 0
for i, a in enumerate(agp):
if not a.is_gap and a.component_id in switchcomponent:
oldid = a.component_id
newid = switchcomponent[a.component_id]
a.component_id = newid
logging.debug("Covert {0} to {1} on line {2}".\
format(oldid, newid, i+1))
nconverts += 1
print(a, file=fw)
logging.debug("Total converted records: {0}".format(nconverts)) |
def log(args, number=None, oneline=False, quiet=False):
"""Run a "git log ..." command, and return stdout
args is anything which can be added after a normal "git log ..."
it can be blank
number, if true-ish, will be added as a "-n" option
oneline, if true-ish, will add the "--oneline" option
"""
options = ' '.join([
number and str('-n %s' % number) or '',
oneline and '--oneline' or ''
])
try:
return run('log %s %s' % (options, args), quiet=quiet)
except UnknownRevision:
return '' | Run a "git log ..." command, and return stdout
args is anything which can be added after a normal "git log ..."
it can be blank
number, if true-ish, will be added as a "-n" option
oneline, if true-ish, will add the "--oneline" option | Below is the the instruction that describes the task:
### Input:
Run a "git log ..." command, and return stdout
args is anything which can be added after a normal "git log ..."
it can be blank
number, if true-ish, will be added as a "-n" option
oneline, if true-ish, will add the "--oneline" option
### Response:
def log(args, number=None, oneline=False, quiet=False):
"""Run a "git log ..." command, and return stdout
args is anything which can be added after a normal "git log ..."
it can be blank
number, if true-ish, will be added as a "-n" option
oneline, if true-ish, will add the "--oneline" option
"""
options = ' '.join([
number and str('-n %s' % number) or '',
oneline and '--oneline' or ''
])
try:
return run('log %s %s' % (options, args), quiet=quiet)
except UnknownRevision:
return '' |
def y_subset(y, query=None, aux=None, subset=None, dropna=False, outcome='true',
k=None, p=None, ascending=False, score='score', p_of='notnull'):
"""
Subset a model "y" dataframe
Args:
query: operates on y, or aux if present
subset: takes a dataframe or index thereof and subsets to that
dropna: means drop missing outcomes
return: top k (count) or p (proportion) if specified
p_of: specifies what the proportion is relative to
'notnull' means proportion is relative to labeled count
'true' means proportion is relative to positive count
'all' means proportion is relative to total count
"""
if query is not None:
if aux is None:
y = y.query(query)
else:
s = aux.ix[y.index]
if len(s) != len(y):
logging.warning('y not a subset of aux')
y = y.ix[s.query(query).index]
if subset is not None:
if hasattr(subset, 'index'):
subset = subset.index
y = y.ix[y.index.intersection(subset)]
if dropna:
y = y.dropna(subset=[outcome])
if k is not None and p is not None:
raise ValueError("Cannot specify both k and p")
elif k is not None:
k = k
elif p is not None:
if p_of == 'notnull':
k = int(p*y[outcome].notnull().sum())
elif p_of == 'true':
k = int(p*y[outcome].sum())
elif p_of == 'all':
k = int(p*len(y))
else:
raise ValueError('Invalid value for p_of: %s' % p_of)
else:
k = None
if k is not None:
y = y.sort_values(score, ascending=ascending).head(k)
return y | Subset a model "y" dataframe
Args:
query: operates on y, or aux if present
subset: takes a dataframe or index thereof and subsets to that
dropna: means drop missing outcomes
return: top k (count) or p (proportion) if specified
p_of: specifies what the proportion is relative to
'notnull' means proportion is relative to labeled count
'true' means proportion is relative to positive count
'all' means proportion is relative to total count | Below is the the instruction that describes the task:
### Input:
Subset a model "y" dataframe
Args:
query: operates on y, or aux if present
subset: takes a dataframe or index thereof and subsets to that
dropna: means drop missing outcomes
return: top k (count) or p (proportion) if specified
p_of: specifies what the proportion is relative to
'notnull' means proportion is relative to labeled count
'true' means proportion is relative to positive count
'all' means proportion is relative to total count
### Response:
def y_subset(y, query=None, aux=None, subset=None, dropna=False, outcome='true',
k=None, p=None, ascending=False, score='score', p_of='notnull'):
"""
Subset a model "y" dataframe
Args:
query: operates on y, or aux if present
subset: takes a dataframe or index thereof and subsets to that
dropna: means drop missing outcomes
return: top k (count) or p (proportion) if specified
p_of: specifies what the proportion is relative to
'notnull' means proportion is relative to labeled count
'true' means proportion is relative to positive count
'all' means proportion is relative to total count
"""
if query is not None:
if aux is None:
y = y.query(query)
else:
s = aux.ix[y.index]
if len(s) != len(y):
logging.warning('y not a subset of aux')
y = y.ix[s.query(query).index]
if subset is not None:
if hasattr(subset, 'index'):
subset = subset.index
y = y.ix[y.index.intersection(subset)]
if dropna:
y = y.dropna(subset=[outcome])
if k is not None and p is not None:
raise ValueError("Cannot specify both k and p")
elif k is not None:
k = k
elif p is not None:
if p_of == 'notnull':
k = int(p*y[outcome].notnull().sum())
elif p_of == 'true':
k = int(p*y[outcome].sum())
elif p_of == 'all':
k = int(p*len(y))
else:
raise ValueError('Invalid value for p_of: %s' % p_of)
else:
k = None
if k is not None:
y = y.sort_values(score, ascending=ascending).head(k)
return y |
def toggle_settings(
toolbar=False, nbname=False, hideprompt=False, kernellogo=False):
"""Toggle main notebook toolbar (e.g., buttons), filename,
and kernel logo."""
toggle = ''
if toolbar:
toggle += 'div#maintoolbar {margin-left: 8px !important;}\n'
toggle += '.toolbar.container {width: 100% !important;}\n'
else:
toggle += 'div#maintoolbar {display: none !important;}\n'
if nbname:
toggle += ('span.save_widget span.filename {margin-left: 8px; height: initial;'
'font-size: 100%; color: @nb-name-fg; background-color:'
'@cc-input-bg;}\n')
toggle += ('span.save_widget span.filename:hover {color:'
'@nb-name-hover; background-color: @cc-input-bg;}\n')
toggle += ('#menubar {padding-top: 4px; background-color:'
'@notebook-bg;}\n')
else:
toggle += '#header-container {display: none !important;}\n'
if hideprompt:
toggle += 'div.prompt.input_prompt {display: none !important;}\n'
toggle += 'div.prompt.output_prompt {width: 5ex !important;}\n'
toggle += 'div.out_prompt_overlay.prompt:hover {width: 5ex !important; min-width: 5ex !important;}\n'
toggle += (
'.CodeMirror-gutters, .cm-s-ipython .CodeMirror-gutters'
'{ position: absolute; left: 0; top: 0; z-index: 3; width: 2em; '
'display: inline-block !important; }\n')
toggle += ('div.cell.code_cell .input { border-left: 5px solid @cm-gutters !important; border-bottom-left-radius: 5px; border-top-left-radius: 5px; }\n')
if kernellogo:
toggle += '@kernel-logo-display: block;'
else:
toggle += '@kernel-logo-display: none;'
return toggle | Toggle main notebook toolbar (e.g., buttons), filename,
and kernel logo. | Below is the the instruction that describes the task:
### Input:
Toggle main notebook toolbar (e.g., buttons), filename,
and kernel logo.
### Response:
def toggle_settings(
toolbar=False, nbname=False, hideprompt=False, kernellogo=False):
"""Toggle main notebook toolbar (e.g., buttons), filename,
and kernel logo."""
toggle = ''
if toolbar:
toggle += 'div#maintoolbar {margin-left: 8px !important;}\n'
toggle += '.toolbar.container {width: 100% !important;}\n'
else:
toggle += 'div#maintoolbar {display: none !important;}\n'
if nbname:
toggle += ('span.save_widget span.filename {margin-left: 8px; height: initial;'
'font-size: 100%; color: @nb-name-fg; background-color:'
'@cc-input-bg;}\n')
toggle += ('span.save_widget span.filename:hover {color:'
'@nb-name-hover; background-color: @cc-input-bg;}\n')
toggle += ('#menubar {padding-top: 4px; background-color:'
'@notebook-bg;}\n')
else:
toggle += '#header-container {display: none !important;}\n'
if hideprompt:
toggle += 'div.prompt.input_prompt {display: none !important;}\n'
toggle += 'div.prompt.output_prompt {width: 5ex !important;}\n'
toggle += 'div.out_prompt_overlay.prompt:hover {width: 5ex !important; min-width: 5ex !important;}\n'
toggle += (
'.CodeMirror-gutters, .cm-s-ipython .CodeMirror-gutters'
'{ position: absolute; left: 0; top: 0; z-index: 3; width: 2em; '
'display: inline-block !important; }\n')
toggle += ('div.cell.code_cell .input { border-left: 5px solid @cm-gutters !important; border-bottom-left-radius: 5px; border-top-left-radius: 5px; }\n')
if kernellogo:
toggle += '@kernel-logo-display: block;'
else:
toggle += '@kernel-logo-display: none;'
return toggle |
def setFontFamily(self, family):
"""
Sets the current font family to the inputed family.
:param family | <str>
"""
self.blockSignals(True)
self.editor().setFontFamily(family)
self.blockSignals(False) | Sets the current font family to the inputed family.
:param family | <str> | Below is the the instruction that describes the task:
### Input:
Sets the current font family to the inputed family.
:param family | <str>
### Response:
def setFontFamily(self, family):
"""
Sets the current font family to the inputed family.
:param family | <str>
"""
self.blockSignals(True)
self.editor().setFontFamily(family)
self.blockSignals(False) |
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
out = self._cast(self._handle(environ), request, response)
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or request.method == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, list(response.iter_headers()))
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, e:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(e)), html_escape(format_exc(10)))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers)
return [tob(err)] | The bottle WSGI-interface. | Below is the the instruction that describes the task:
### Input:
The bottle WSGI-interface.
### Response:
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
out = self._cast(self._handle(environ), request, response)
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or request.method == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, list(response.iter_headers()))
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception, e:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(e)), html_escape(format_exc(10)))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers)
return [tob(err)] |
def init_account(self):
"""Setup a new GitHub account."""
ghuser = self.api.me()
# Setup local access tokens to be used by the webhooks
hook_token = ProviderToken.create_personal(
'github-webhook',
self.user_id,
scopes=['webhooks:event'],
is_internal=True,
)
# Initial structure of extra data
self.account.extra_data = dict(
id=ghuser.id,
login=ghuser.login,
name=ghuser.name,
tokens=dict(
webhook=hook_token.id,
),
repos=dict(),
last_sync=iso_utcnow(),
)
db.session.add(self.account)
# Sync data from GitHub, but don't check repository hooks yet.
self.sync(hooks=False) | Setup a new GitHub account. | Below is the the instruction that describes the task:
### Input:
Setup a new GitHub account.
### Response:
def init_account(self):
"""Setup a new GitHub account."""
ghuser = self.api.me()
# Setup local access tokens to be used by the webhooks
hook_token = ProviderToken.create_personal(
'github-webhook',
self.user_id,
scopes=['webhooks:event'],
is_internal=True,
)
# Initial structure of extra data
self.account.extra_data = dict(
id=ghuser.id,
login=ghuser.login,
name=ghuser.name,
tokens=dict(
webhook=hook_token.id,
),
repos=dict(),
last_sync=iso_utcnow(),
)
db.session.add(self.account)
# Sync data from GitHub, but don't check repository hooks yet.
self.sync(hooks=False) |
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace)) | A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')] | Below is the the instruction that describes the task:
### Input:
A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
### Response:
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace)) |
def set_mesh(self,
mesh,
shift=None,
is_time_reversal=True,
is_mesh_symmetry=True,
is_eigenvectors=False,
is_gamma_center=False,
run_immediately=True):
"""Phonon calculations on sampling mesh grids
Parameters
----------
mesh: array_like
Mesh numbers along a, b, c axes.
dtype='intc'
shape=(3,)
shift: array_like, optional, default None (no shift)
Mesh shifts along a*, b*, c* axes with respect to neighboring grid
points from the original mesh (Monkhorst-Pack or Gamma center).
0.5 gives half grid shift. Normally 0 or 0.5 is given.
Otherwise q-points symmetry search is not performed.
dtype='double'
shape=(3, )
is_time_reversal: bool, optional, default True
Time reversal symmetry is considered in symmetry search. By this,
inversion symmetry is always included.
is_mesh_symmetry: bool, optional, default True
Wheather symmetry search is done or not.
is_eigenvectors: bool, optional, default False
Eigenvectors are stored by setting True.
is_gamma_center: bool, default False
Uniform mesh grids are generated centring at Gamma point but not
the Monkhorst-Pack scheme.
run_immediately: bool, default True
With True, phonon calculations are performed immediately, which is
usual usage.
"""
warnings.warn("Phonopy.set_mesh is deprecated. "
"Use Phonopy.run_mesh.", DeprecationWarning)
if self._group_velocity is None:
with_group_velocities = False
else:
with_group_velocities = True
if run_immediately:
self.run_mesh(mesh,
shift=shift,
is_time_reversal=is_time_reversal,
is_mesh_symmetry=is_mesh_symmetry,
with_eigenvectors=is_eigenvectors,
with_group_velocities=with_group_velocities,
is_gamma_center=is_gamma_center)
else:
self.init_mesh(mesh,
shift=shift,
is_time_reversal=is_time_reversal,
is_mesh_symmetry=is_mesh_symmetry,
with_eigenvectors=is_eigenvectors,
with_group_velocities=with_group_velocities,
is_gamma_center=is_gamma_center) | Phonon calculations on sampling mesh grids
Parameters
----------
mesh: array_like
Mesh numbers along a, b, c axes.
dtype='intc'
shape=(3,)
shift: array_like, optional, default None (no shift)
Mesh shifts along a*, b*, c* axes with respect to neighboring grid
points from the original mesh (Monkhorst-Pack or Gamma center).
0.5 gives half grid shift. Normally 0 or 0.5 is given.
Otherwise q-points symmetry search is not performed.
dtype='double'
shape=(3, )
is_time_reversal: bool, optional, default True
Time reversal symmetry is considered in symmetry search. By this,
inversion symmetry is always included.
is_mesh_symmetry: bool, optional, default True
Wheather symmetry search is done or not.
is_eigenvectors: bool, optional, default False
Eigenvectors are stored by setting True.
is_gamma_center: bool, default False
Uniform mesh grids are generated centring at Gamma point but not
the Monkhorst-Pack scheme.
run_immediately: bool, default True
With True, phonon calculations are performed immediately, which is
usual usage. | Below is the the instruction that describes the task:
### Input:
Phonon calculations on sampling mesh grids
Parameters
----------
mesh: array_like
Mesh numbers along a, b, c axes.
dtype='intc'
shape=(3,)
shift: array_like, optional, default None (no shift)
Mesh shifts along a*, b*, c* axes with respect to neighboring grid
points from the original mesh (Monkhorst-Pack or Gamma center).
0.5 gives half grid shift. Normally 0 or 0.5 is given.
Otherwise q-points symmetry search is not performed.
dtype='double'
shape=(3, )
is_time_reversal: bool, optional, default True
Time reversal symmetry is considered in symmetry search. By this,
inversion symmetry is always included.
is_mesh_symmetry: bool, optional, default True
Wheather symmetry search is done or not.
is_eigenvectors: bool, optional, default False
Eigenvectors are stored by setting True.
is_gamma_center: bool, default False
Uniform mesh grids are generated centring at Gamma point but not
the Monkhorst-Pack scheme.
run_immediately: bool, default True
With True, phonon calculations are performed immediately, which is
usual usage.
### Response:
def set_mesh(self,
mesh,
shift=None,
is_time_reversal=True,
is_mesh_symmetry=True,
is_eigenvectors=False,
is_gamma_center=False,
run_immediately=True):
"""Phonon calculations on sampling mesh grids
Parameters
----------
mesh: array_like
Mesh numbers along a, b, c axes.
dtype='intc'
shape=(3,)
shift: array_like, optional, default None (no shift)
Mesh shifts along a*, b*, c* axes with respect to neighboring grid
points from the original mesh (Monkhorst-Pack or Gamma center).
0.5 gives half grid shift. Normally 0 or 0.5 is given.
Otherwise q-points symmetry search is not performed.
dtype='double'
shape=(3, )
is_time_reversal: bool, optional, default True
Time reversal symmetry is considered in symmetry search. By this,
inversion symmetry is always included.
is_mesh_symmetry: bool, optional, default True
Wheather symmetry search is done or not.
is_eigenvectors: bool, optional, default False
Eigenvectors are stored by setting True.
is_gamma_center: bool, default False
Uniform mesh grids are generated centring at Gamma point but not
the Monkhorst-Pack scheme.
run_immediately: bool, default True
With True, phonon calculations are performed immediately, which is
usual usage.
"""
warnings.warn("Phonopy.set_mesh is deprecated. "
"Use Phonopy.run_mesh.", DeprecationWarning)
if self._group_velocity is None:
with_group_velocities = False
else:
with_group_velocities = True
if run_immediately:
self.run_mesh(mesh,
shift=shift,
is_time_reversal=is_time_reversal,
is_mesh_symmetry=is_mesh_symmetry,
with_eigenvectors=is_eigenvectors,
with_group_velocities=with_group_velocities,
is_gamma_center=is_gamma_center)
else:
self.init_mesh(mesh,
shift=shift,
is_time_reversal=is_time_reversal,
is_mesh_symmetry=is_mesh_symmetry,
with_eigenvectors=is_eigenvectors,
with_group_velocities=with_group_velocities,
is_gamma_center=is_gamma_center) |
def table_formatter(self, dataframe, inc_header=1, inc_index=1):
"""Return a table formatter for the dataframe. Saves the user the need to import this class"""
return TableFormatter(dataframe, inc_header=inc_header, inc_index=inc_index) | Return a table formatter for the dataframe. Saves the user the need to import this class | Below is the the instruction that describes the task:
### Input:
Return a table formatter for the dataframe. Saves the user the need to import this class
### Response:
def table_formatter(self, dataframe, inc_header=1, inc_index=1):
"""Return a table formatter for the dataframe. Saves the user the need to import this class"""
return TableFormatter(dataframe, inc_header=inc_header, inc_index=inc_index) |
def _prep_ssh(
self,
tgt,
fun,
arg=(),
timeout=None,
tgt_type='glob',
kwarg=None,
**kwargs):
'''
Prepare the arguments
'''
opts = copy.deepcopy(self.opts)
opts.update(kwargs)
if timeout:
opts['timeout'] = timeout
arg = salt.utils.args.condition_input(arg, kwarg)
opts['argv'] = [fun] + arg
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = arg
return salt.client.ssh.SSH(opts) | Prepare the arguments | Below is the the instruction that describes the task:
### Input:
Prepare the arguments
### Response:
def _prep_ssh(
self,
tgt,
fun,
arg=(),
timeout=None,
tgt_type='glob',
kwarg=None,
**kwargs):
'''
Prepare the arguments
'''
opts = copy.deepcopy(self.opts)
opts.update(kwargs)
if timeout:
opts['timeout'] = timeout
arg = salt.utils.args.condition_input(arg, kwarg)
opts['argv'] = [fun] + arg
opts['selected_target_option'] = tgt_type
opts['tgt'] = tgt
opts['arg'] = arg
return salt.client.ssh.SSH(opts) |
def dump(self):
"""Print a formatted summary of the current solve state."""
from rez.utils.formatting import columnise
rows = []
for i, phase in enumerate(self.phase_stack):
rows.append((self._depth_label(i), phase.status, str(phase)))
print "status: %s (%s)" % (self.status.name, self.status.description)
print "initial request: %s" % str(self.request_list)
print
print "solve stack:"
print '\n'.join(columnise(rows))
if self.failed_phase_list:
rows = []
for i, phase in enumerate(self.failed_phase_list):
rows.append(("#%d" % i, phase.status, str(phase)))
print
print "previous failures:"
print '\n'.join(columnise(rows)) | Print a formatted summary of the current solve state. | Below is the the instruction that describes the task:
### Input:
Print a formatted summary of the current solve state.
### Response:
def dump(self):
"""Print a formatted summary of the current solve state."""
from rez.utils.formatting import columnise
rows = []
for i, phase in enumerate(self.phase_stack):
rows.append((self._depth_label(i), phase.status, str(phase)))
print "status: %s (%s)" % (self.status.name, self.status.description)
print "initial request: %s" % str(self.request_list)
print
print "solve stack:"
print '\n'.join(columnise(rows))
if self.failed_phase_list:
rows = []
for i, phase in enumerate(self.failed_phase_list):
rows.append(("#%d" % i, phase.status, str(phase)))
print
print "previous failures:"
print '\n'.join(columnise(rows)) |
def collation(self, collation):
"""Adds a :class:`~pymongo.collation.Collation` to this query.
This option is only supported on MongoDB 3.4 and above.
Raises :exc:`TypeError` if `collation` is not an instance of
:class:`~pymongo.collation.Collation` or a ``dict``. Raises
:exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has
already been used. Only the last collation applied to this cursor has
any effect.
:Parameters:
- `collation`: An instance of :class:`~pymongo.collation.Collation`.
"""
self.__check_okay_to_chain()
self.__collation = validate_collation_or_none(collation)
return self | Adds a :class:`~pymongo.collation.Collation` to this query.
This option is only supported on MongoDB 3.4 and above.
Raises :exc:`TypeError` if `collation` is not an instance of
:class:`~pymongo.collation.Collation` or a ``dict``. Raises
:exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has
already been used. Only the last collation applied to this cursor has
any effect.
:Parameters:
- `collation`: An instance of :class:`~pymongo.collation.Collation`. | Below is the the instruction that describes the task:
### Input:
Adds a :class:`~pymongo.collation.Collation` to this query.
This option is only supported on MongoDB 3.4 and above.
Raises :exc:`TypeError` if `collation` is not an instance of
:class:`~pymongo.collation.Collation` or a ``dict``. Raises
:exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has
already been used. Only the last collation applied to this cursor has
any effect.
:Parameters:
- `collation`: An instance of :class:`~pymongo.collation.Collation`.
### Response:
def collation(self, collation):
"""Adds a :class:`~pymongo.collation.Collation` to this query.
This option is only supported on MongoDB 3.4 and above.
Raises :exc:`TypeError` if `collation` is not an instance of
:class:`~pymongo.collation.Collation` or a ``dict``. Raises
:exc:`~pymongo.errors.InvalidOperation` if this :class:`Cursor` has
already been used. Only the last collation applied to this cursor has
any effect.
:Parameters:
- `collation`: An instance of :class:`~pymongo.collation.Collation`.
"""
self.__check_okay_to_chain()
self.__collation = validate_collation_or_none(collation)
return self |
def gsea(data, gene_sets, cls, outdir='GSEA_', min_size=15, max_size=500, permutation_num=1000,
weighted_score_type=1,permutation_type='gene_set', method='log2_ratio_of_classes',
ascending=False, processes=1, figsize=(6.5,6), format='pdf',
graph_num=20, no_plot=False, seed=None, verbose=False):
""" Run Gene Set Enrichment Analysis.
:param data: Gene expression data table, Pandas DataFrame, gct file.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param cls: A list or a .cls file format required for GSEA.
:param str outdir: Results output directory.
:param int permutation_num: Number of permutations for significance computation. Default: 1000.
:param str permutation_type: Permutation type, "phenotype" for phenotypes, "gene_set" for genes.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 500.
:param float weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:1.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for nature scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a GSEA obj. All results store to a dictionary, obj.results,
where contains::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes}
"""
gs = GSEA(data, gene_sets, cls, outdir, min_size, max_size, permutation_num,
weighted_score_type, permutation_type, method, ascending, processes,
figsize, format, graph_num, no_plot, seed, verbose)
gs.run()
return gs | Run Gene Set Enrichment Analysis.
:param data: Gene expression data table, Pandas DataFrame, gct file.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param cls: A list or a .cls file format required for GSEA.
:param str outdir: Results output directory.
:param int permutation_num: Number of permutations for significance computation. Default: 1000.
:param str permutation_type: Permutation type, "phenotype" for phenotypes, "gene_set" for genes.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 500.
:param float weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:1.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for nature scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a GSEA obj. All results store to a dictionary, obj.results,
where contains::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes} | Below is the the instruction that describes the task:
### Input:
Run Gene Set Enrichment Analysis.
:param data: Gene expression data table, Pandas DataFrame, gct file.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param cls: A list or a .cls file format required for GSEA.
:param str outdir: Results output directory.
:param int permutation_num: Number of permutations for significance computation. Default: 1000.
:param str permutation_type: Permutation type, "phenotype" for phenotypes, "gene_set" for genes.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 500.
:param float weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:1.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for nature scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a GSEA obj. All results store to a dictionary, obj.results,
where contains::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes}
### Response:
def gsea(data, gene_sets, cls, outdir='GSEA_', min_size=15, max_size=500, permutation_num=1000,
weighted_score_type=1,permutation_type='gene_set', method='log2_ratio_of_classes',
ascending=False, processes=1, figsize=(6.5,6), format='pdf',
graph_num=20, no_plot=False, seed=None, verbose=False):
""" Run Gene Set Enrichment Analysis.
:param data: Gene expression data table, Pandas DataFrame, gct file.
:param gene_sets: Enrichr Library name or .gmt gene sets file or dict of gene sets. Same input with GSEA.
:param cls: A list or a .cls file format required for GSEA.
:param str outdir: Results output directory.
:param int permutation_num: Number of permutations for significance computation. Default: 1000.
:param str permutation_type: Permutation type, "phenotype" for phenotypes, "gene_set" for genes.
:param int min_size: Minimum allowed number of genes from gene set also the data set. Default: 15.
:param int max_size: Maximum allowed number of genes from gene set also the data set. Default: 500.
:param float weighted_score_type: Refer to :func:`algorithm.enrichment_score`. Default:1.
:param method: The method used to calculate a correlation or ranking. Default: 'log2_ratio_of_classes'.
Others methods are:
1. 'signal_to_noise'
You must have at least three samples for each phenotype to use this metric.
The larger the signal-to-noise ratio, the larger the differences of the means (scaled by the standard deviations);
that is, the more distinct the gene expression is in each phenotype and the more the gene acts as a “class marker.”
2. 't_test'
Uses the difference of means scaled by the standard deviation and number of samples.
Note: You must have at least three samples for each phenotype to use this metric.
The larger the tTest ratio, the more distinct the gene expression is in each phenotype
and the more the gene acts as a “class marker.”
3. 'ratio_of_classes' (also referred to as fold change).
Uses the ratio of class means to calculate fold change for natural scale data.
4. 'diff_of_classes'
Uses the difference of class means to calculate fold change for nature scale data
5. 'log2_ratio_of_classes'
Uses the log2 ratio of class means to calculate fold change for natural scale data.
This is the recommended statistic for calculating fold change for log scale data.
:param bool ascending: Sorting order of rankings. Default: False.
:param int processes: Number of Processes you are going to use. Default: 1.
:param list figsize: Matplotlib figsize, accept a tuple or list, e.g. [width,height]. Default: [6.5,6].
:param str format: Matplotlib figure format. Default: 'pdf'.
:param int graph_num: Plot graphs for top sets of each phenotype.
:param bool no_plot: If equals to True, no figure will be drawn. Default: False.
:param seed: Random seed. expect an integer. Default:None.
:param bool verbose: Bool, increase output verbosity, print out progress of your job, Default: False.
:return: Return a GSEA obj. All results store to a dictionary, obj.results,
where contains::
| {es: enrichment score,
| nes: normalized enrichment score,
| p: P-value,
| fdr: FDR,
| size: gene set size,
| matched_size: genes matched to the data,
| genes: gene names from the data set
| ledge_genes: leading edge genes}
"""
gs = GSEA(data, gene_sets, cls, outdir, min_size, max_size, permutation_num,
weighted_score_type, permutation_type, method, ascending, processes,
figsize, format, graph_num, no_plot, seed, verbose)
gs.run()
return gs |
def linear_gradient(start_hex, finish_hex, n=10):
"""
Interpolates the color gradient between to hex colors
"""
s = hex2rgb(start_hex)
f = hex2rgb(finish_hex)
gradient = [s]
for t in range(1, n):
curr_vector = [int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3)]
gradient.append(curr_vector)
return [rgb2hex([c/255. for c in rgb]) for rgb in gradient] | Interpolates the color gradient between to hex colors | Below is the the instruction that describes the task:
### Input:
Interpolates the color gradient between to hex colors
### Response:
def linear_gradient(start_hex, finish_hex, n=10):
"""
Interpolates the color gradient between to hex colors
"""
s = hex2rgb(start_hex)
f = hex2rgb(finish_hex)
gradient = [s]
for t in range(1, n):
curr_vector = [int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3)]
gradient.append(curr_vector)
return [rgb2hex([c/255. for c in rgb]) for rgb in gradient] |
def prepare_mergetable_sql(self, precursor=False, isobaric=False,
probability=False, fdr=False, pep=False):
"""Dynamically build SQL query to generate entries for the multi-set
merged protein and peptide tables. E.g.
SELECT g.gene_acc, pc.channel_name, pc.amount_psms_name,
giq.quantvalue giq.amount_psms gfdr.fdr
FROM genes AS g
JOIN biosets AS bs
JOIN gene_tables AS gt ON gt.set_id=bs.set_id
JOIN genequant_channels AS pc ON pc.gene_table_id=gt.genetable_id
JOIN gene_iso_quanted AS giq ON giq.gene_id=g.gene_id
AND giq.channel_id=pc.channel_id
JOIN gene_fdr AS gfdr ON gfdr.gene_id=g.gene_id
AND gfdr.genetable_id=gt.genetable_id
ORDER BY g.gene
This is multi-set output because we join on biosets. The output is
then parsed to its respective set by the action code.
"""
featcol = self.colmap[self.table_map[self.datatype]['feattable']][1]
selectmap, count = self.update_selects({}, ['p_acc', 'set_name'], 0)
joins = []
if self.datatype == 'protein':
selects = ['pgm.{}'.format(featcol), 'bs.set_name']
firstselect = 'pgm'
joins.append(('proteins', 'g', ['pgm']))
else:
selects = ['g.{}'.format(featcol), 'bs.set_name']
firstselect = 'g'
if isobaric:
selects.extend(['pc.channel_name',
'pc.amount_psms_name', 'giq.quantvalue',
'giq.amount_psms'])
joins.extend([(self.table_map[self.datatype]['isochtable'], 'pc',
['gt']),
(self.table_map[self.datatype]['isoqtable'], 'giq',
['g', 'pc'], True),
])
fld = ['channel', 'isoq_psmsfield', 'isoq_val',
'isoq_psms']
selectmap, count = self.update_selects(selectmap, fld, count)
if precursor:
selects.extend(['preq.quant'])
joins.append((self.table_map[self.datatype]['prectable'], 'preq',
['g', 'gt'], True))
fld = ['preq_val']
selectmap, count = self.update_selects(selectmap, fld, count)
if probability:
selects.extend(['gprob.probability'])
joins.append((self.table_map[self.datatype]['probabilitytable'],
'gprob', ['g', 'gt'], True))
fld = ['prob_val']
selectmap, count = self.update_selects(selectmap, fld, count)
if fdr:
selects.extend(['gfdr.fdr'])
joins.append((self.table_map[self.datatype]['fdrtable'], 'gfdr',
['g', 'gt'], True))
fld = ['fdr_val']
selectmap, count = self.update_selects(selectmap, fld, count)
if pep:
selects.extend(['gpep.pep'])
joins.append((self.table_map[self.datatype]['peptable'], 'gpep',
['g', 'gt'], True))
fld = ['pep_val']
selectmap, count = self.update_selects(selectmap, fld, count)
sql = ('SELECT {} FROM {} AS {} JOIN biosets AS bs '
'JOIN {} AS gt ON gt.set_id=bs.set_id'.format(
', '.join(selects),
self.table_map[self.datatype]['feattable'],
firstselect,
self.table_map[self.datatype]['fntable']))
sql = self.get_sql_joins_mergetable(sql, joins, self.datatype)
sql = '{} ORDER BY g.{}'.format(sql, featcol)
return sql, selectmap | Dynamically build SQL query to generate entries for the multi-set
merged protein and peptide tables. E.g.
SELECT g.gene_acc, pc.channel_name, pc.amount_psms_name,
giq.quantvalue giq.amount_psms gfdr.fdr
FROM genes AS g
JOIN biosets AS bs
JOIN gene_tables AS gt ON gt.set_id=bs.set_id
JOIN genequant_channels AS pc ON pc.gene_table_id=gt.genetable_id
JOIN gene_iso_quanted AS giq ON giq.gene_id=g.gene_id
AND giq.channel_id=pc.channel_id
JOIN gene_fdr AS gfdr ON gfdr.gene_id=g.gene_id
AND gfdr.genetable_id=gt.genetable_id
ORDER BY g.gene
This is multi-set output because we join on biosets. The output is
then parsed to its respective set by the action code. | Below is the the instruction that describes the task:
### Input:
Dynamically build SQL query to generate entries for the multi-set
merged protein and peptide tables. E.g.
SELECT g.gene_acc, pc.channel_name, pc.amount_psms_name,
giq.quantvalue giq.amount_psms gfdr.fdr
FROM genes AS g
JOIN biosets AS bs
JOIN gene_tables AS gt ON gt.set_id=bs.set_id
JOIN genequant_channels AS pc ON pc.gene_table_id=gt.genetable_id
JOIN gene_iso_quanted AS giq ON giq.gene_id=g.gene_id
AND giq.channel_id=pc.channel_id
JOIN gene_fdr AS gfdr ON gfdr.gene_id=g.gene_id
AND gfdr.genetable_id=gt.genetable_id
ORDER BY g.gene
This is multi-set output because we join on biosets. The output is
then parsed to its respective set by the action code.
### Response:
def prepare_mergetable_sql(self, precursor=False, isobaric=False,
probability=False, fdr=False, pep=False):
"""Dynamically build SQL query to generate entries for the multi-set
merged protein and peptide tables. E.g.
SELECT g.gene_acc, pc.channel_name, pc.amount_psms_name,
giq.quantvalue giq.amount_psms gfdr.fdr
FROM genes AS g
JOIN biosets AS bs
JOIN gene_tables AS gt ON gt.set_id=bs.set_id
JOIN genequant_channels AS pc ON pc.gene_table_id=gt.genetable_id
JOIN gene_iso_quanted AS giq ON giq.gene_id=g.gene_id
AND giq.channel_id=pc.channel_id
JOIN gene_fdr AS gfdr ON gfdr.gene_id=g.gene_id
AND gfdr.genetable_id=gt.genetable_id
ORDER BY g.gene
This is multi-set output because we join on biosets. The output is
then parsed to its respective set by the action code.
"""
featcol = self.colmap[self.table_map[self.datatype]['feattable']][1]
selectmap, count = self.update_selects({}, ['p_acc', 'set_name'], 0)
joins = []
if self.datatype == 'protein':
selects = ['pgm.{}'.format(featcol), 'bs.set_name']
firstselect = 'pgm'
joins.append(('proteins', 'g', ['pgm']))
else:
selects = ['g.{}'.format(featcol), 'bs.set_name']
firstselect = 'g'
if isobaric:
selects.extend(['pc.channel_name',
'pc.amount_psms_name', 'giq.quantvalue',
'giq.amount_psms'])
joins.extend([(self.table_map[self.datatype]['isochtable'], 'pc',
['gt']),
(self.table_map[self.datatype]['isoqtable'], 'giq',
['g', 'pc'], True),
])
fld = ['channel', 'isoq_psmsfield', 'isoq_val',
'isoq_psms']
selectmap, count = self.update_selects(selectmap, fld, count)
if precursor:
selects.extend(['preq.quant'])
joins.append((self.table_map[self.datatype]['prectable'], 'preq',
['g', 'gt'], True))
fld = ['preq_val']
selectmap, count = self.update_selects(selectmap, fld, count)
if probability:
selects.extend(['gprob.probability'])
joins.append((self.table_map[self.datatype]['probabilitytable'],
'gprob', ['g', 'gt'], True))
fld = ['prob_val']
selectmap, count = self.update_selects(selectmap, fld, count)
if fdr:
selects.extend(['gfdr.fdr'])
joins.append((self.table_map[self.datatype]['fdrtable'], 'gfdr',
['g', 'gt'], True))
fld = ['fdr_val']
selectmap, count = self.update_selects(selectmap, fld, count)
if pep:
selects.extend(['gpep.pep'])
joins.append((self.table_map[self.datatype]['peptable'], 'gpep',
['g', 'gt'], True))
fld = ['pep_val']
selectmap, count = self.update_selects(selectmap, fld, count)
sql = ('SELECT {} FROM {} AS {} JOIN biosets AS bs '
'JOIN {} AS gt ON gt.set_id=bs.set_id'.format(
', '.join(selects),
self.table_map[self.datatype]['feattable'],
firstselect,
self.table_map[self.datatype]['fntable']))
sql = self.get_sql_joins_mergetable(sql, joins, self.datatype)
sql = '{} ORDER BY g.{}'.format(sql, featcol)
return sql, selectmap |
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method="--")
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException:
pass
return [] | Returns the valid methods that match for a given path.
.. versionadded:: 0.7 | Below is the the instruction that describes the task:
### Input:
Returns the valid methods that match for a given path.
.. versionadded:: 0.7
### Response:
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method="--")
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException:
pass
return [] |
def info_community(self,teamid):
'''Get comunity info using a ID'''
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/standings.phtml',"User-Agent": user_agent}
req = self.session.get('http://'+self.domain+'/teamInfo.phtml?tid='+teamid,headers=headers).content
soup = BeautifulSoup(req)
info = []
for i in soup.find('table',cellpadding=2).find_all('tr')[1:]:
info.append('%s\t%s\t%s\t%s\t%s'%(i.find('td').text,i.find('a')['href'].split('pid=')[1],i.a.text,i.find_all('td')[2].text,i.find_all('td')[3].text))
return info | Get comunity info using a ID | Below is the the instruction that describes the task:
### Input:
Get comunity info using a ID
### Response:
def info_community(self,teamid):
'''Get comunity info using a ID'''
headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain",'Referer': 'http://'+self.domain+'/standings.phtml',"User-Agent": user_agent}
req = self.session.get('http://'+self.domain+'/teamInfo.phtml?tid='+teamid,headers=headers).content
soup = BeautifulSoup(req)
info = []
for i in soup.find('table',cellpadding=2).find_all('tr')[1:]:
info.append('%s\t%s\t%s\t%s\t%s'%(i.find('td').text,i.find('a')['href'].split('pid=')[1],i.a.text,i.find_all('td')[2].text,i.find_all('td')[3].text))
return info |
def import_words_from_file(self,
inputfile: str,
is_diceware: bool) -> None:
"""Import words for the wordlist from a given file.
The file can have a single column with words or be diceware-like
(two columns).
Keyword arguments:
inputfile -- A string with the path to the wordlist file to load, or
the value 'internal' to load the internal one.
is_diceware -- True if the file is diceware-like.
"""
if not Aux.isfile_notempty(inputfile):
raise FileNotFoundError('Input file does not exists, is not valid '
'or is empty: {}'.format(inputfile))
self._wordlist_entropy_bits = None
if is_diceware:
self._wordlist = self._read_words_from_diceware(inputfile)
else:
self._wordlist = self._read_words_from_wordfile(inputfile) | Import words for the wordlist from a given file.
The file can have a single column with words or be diceware-like
(two columns).
Keyword arguments:
inputfile -- A string with the path to the wordlist file to load, or
the value 'internal' to load the internal one.
is_diceware -- True if the file is diceware-like. | Below is the the instruction that describes the task:
### Input:
Import words for the wordlist from a given file.
The file can have a single column with words or be diceware-like
(two columns).
Keyword arguments:
inputfile -- A string with the path to the wordlist file to load, or
the value 'internal' to load the internal one.
is_diceware -- True if the file is diceware-like.
### Response:
def import_words_from_file(self,
inputfile: str,
is_diceware: bool) -> None:
"""Import words for the wordlist from a given file.
The file can have a single column with words or be diceware-like
(two columns).
Keyword arguments:
inputfile -- A string with the path to the wordlist file to load, or
the value 'internal' to load the internal one.
is_diceware -- True if the file is diceware-like.
"""
if not Aux.isfile_notempty(inputfile):
raise FileNotFoundError('Input file does not exists, is not valid '
'or is empty: {}'.format(inputfile))
self._wordlist_entropy_bits = None
if is_diceware:
self._wordlist = self._read_words_from_diceware(inputfile)
else:
self._wordlist = self._read_words_from_wordfile(inputfile) |
def warn(self, message, *args, **kwargs):
"""Send email and syslog by default ...
"""
self._log(logging.WARNING, message, *args, **kwargs) | Send email and syslog by default ... | Below is the the instruction that describes the task:
### Input:
Send email and syslog by default ...
### Response:
def warn(self, message, *args, **kwargs):
"""Send email and syslog by default ...
"""
self._log(logging.WARNING, message, *args, **kwargs) |
def add_pypiper_args(parser, groups=("pypiper", ), args=None,
required=None, all_args=False):
"""
Use this to add standardized pypiper arguments to your python pipeline.
There are two ways to use `add_pypiper_args`: by specifying argument groups,
or by specifying individual arguments. Specifying argument groups will add
multiple arguments to your parser; these convenient argument groupings
make it easy to add arguments to certain types of pipeline. For example,
to make a looper-compatible pipeline, use `groups = ["pypiper", "looper"]`.
:param argparse.ArgumentParser parser: ArgumentParser object from a pipeline
:param str | Iterable[str] groups: Adds arguments belong to specified group
of args. Options: pypiper, config, looper, resources, common, ngs, all.
:param str | Iterable[str] args: You may specify a list of specific arguments one by one.
:param Iterable[str] required: Arguments to be flagged as 'required' by argparse.
:param bool all_args: Whether to include all of pypiper's arguments defined here.
:return argparse.ArgumentParser: A new ArgumentParser object, with selected
pypiper arguments added
"""
args_to_add = _determine_args(
argument_groups=groups, arguments=args, use_all_args=all_args)
parser = _add_args(parser, args_to_add, required)
return parser | Use this to add standardized pypiper arguments to your python pipeline.
There are two ways to use `add_pypiper_args`: by specifying argument groups,
or by specifying individual arguments. Specifying argument groups will add
multiple arguments to your parser; these convenient argument groupings
make it easy to add arguments to certain types of pipeline. For example,
to make a looper-compatible pipeline, use `groups = ["pypiper", "looper"]`.
:param argparse.ArgumentParser parser: ArgumentParser object from a pipeline
:param str | Iterable[str] groups: Adds arguments belong to specified group
of args. Options: pypiper, config, looper, resources, common, ngs, all.
:param str | Iterable[str] args: You may specify a list of specific arguments one by one.
:param Iterable[str] required: Arguments to be flagged as 'required' by argparse.
:param bool all_args: Whether to include all of pypiper's arguments defined here.
:return argparse.ArgumentParser: A new ArgumentParser object, with selected
pypiper arguments added | Below is the the instruction that describes the task:
### Input:
Use this to add standardized pypiper arguments to your python pipeline.
There are two ways to use `add_pypiper_args`: by specifying argument groups,
or by specifying individual arguments. Specifying argument groups will add
multiple arguments to your parser; these convenient argument groupings
make it easy to add arguments to certain types of pipeline. For example,
to make a looper-compatible pipeline, use `groups = ["pypiper", "looper"]`.
:param argparse.ArgumentParser parser: ArgumentParser object from a pipeline
:param str | Iterable[str] groups: Adds arguments belong to specified group
of args. Options: pypiper, config, looper, resources, common, ngs, all.
:param str | Iterable[str] args: You may specify a list of specific arguments one by one.
:param Iterable[str] required: Arguments to be flagged as 'required' by argparse.
:param bool all_args: Whether to include all of pypiper's arguments defined here.
:return argparse.ArgumentParser: A new ArgumentParser object, with selected
pypiper arguments added
### Response:
def add_pypiper_args(parser, groups=("pypiper", ), args=None,
required=None, all_args=False):
"""
Use this to add standardized pypiper arguments to your python pipeline.
There are two ways to use `add_pypiper_args`: by specifying argument groups,
or by specifying individual arguments. Specifying argument groups will add
multiple arguments to your parser; these convenient argument groupings
make it easy to add arguments to certain types of pipeline. For example,
to make a looper-compatible pipeline, use `groups = ["pypiper", "looper"]`.
:param argparse.ArgumentParser parser: ArgumentParser object from a pipeline
:param str | Iterable[str] groups: Adds arguments belong to specified group
of args. Options: pypiper, config, looper, resources, common, ngs, all.
:param str | Iterable[str] args: You may specify a list of specific arguments one by one.
:param Iterable[str] required: Arguments to be flagged as 'required' by argparse.
:param bool all_args: Whether to include all of pypiper's arguments defined here.
:return argparse.ArgumentParser: A new ArgumentParser object, with selected
pypiper arguments added
"""
args_to_add = _determine_args(
argument_groups=groups, arguments=args, use_all_args=all_args)
parser = _add_args(parser, args_to_add, required)
return parser |
def profile_args(_args):
"""Return args for v1, v2, or v3 structure.
Args:
_args (dict): The args section from the profile.
Returns:
dict: A collapsed version of the args dict.
"""
# TODO: clean this up in a way that works for both py2/3
if (
_args.get('app', {}).get('optional') is not None
or _args.get('app', {}).get('required') is not None
):
# detect v3 schema
app_args_optional = _args.get('app', {}).get('optional', {})
app_args_required = _args.get('app', {}).get('required', {})
default_args = _args.get('default', {})
_args = {}
_args.update(app_args_optional)
_args.update(app_args_required)
_args.update(default_args)
elif _args.get('app') is not None and _args.get('default') is not None:
# detect v2 schema
app_args = _args.get('app', {})
default_args = _args.get('default', {})
_args = {}
_args.update(app_args)
_args.update(default_args)
return _args | Return args for v1, v2, or v3 structure.
Args:
_args (dict): The args section from the profile.
Returns:
dict: A collapsed version of the args dict. | Below is the the instruction that describes the task:
### Input:
Return args for v1, v2, or v3 structure.
Args:
_args (dict): The args section from the profile.
Returns:
dict: A collapsed version of the args dict.
### Response:
def profile_args(_args):
"""Return args for v1, v2, or v3 structure.
Args:
_args (dict): The args section from the profile.
Returns:
dict: A collapsed version of the args dict.
"""
# TODO: clean this up in a way that works for both py2/3
if (
_args.get('app', {}).get('optional') is not None
or _args.get('app', {}).get('required') is not None
):
# detect v3 schema
app_args_optional = _args.get('app', {}).get('optional', {})
app_args_required = _args.get('app', {}).get('required', {})
default_args = _args.get('default', {})
_args = {}
_args.update(app_args_optional)
_args.update(app_args_required)
_args.update(default_args)
elif _args.get('app') is not None and _args.get('default') is not None:
# detect v2 schema
app_args = _args.get('app', {})
default_args = _args.get('default', {})
_args = {}
_args.update(app_args)
_args.update(default_args)
return _args |
def to_projection(self):
"""
Promote this column expression to a table projection
"""
roots = self._root_tables()
if len(roots) > 1:
raise com.RelationError(
'Cannot convert array expression '
'involving multiple base table references '
'to a projection'
)
table = TableExpr(roots[0])
return table.projection([self]) | Promote this column expression to a table projection | Below is the the instruction that describes the task:
### Input:
Promote this column expression to a table projection
### Response:
def to_projection(self):
"""
Promote this column expression to a table projection
"""
roots = self._root_tables()
if len(roots) > 1:
raise com.RelationError(
'Cannot convert array expression '
'involving multiple base table references '
'to a projection'
)
table = TableExpr(roots[0])
return table.projection([self]) |
def set_mag_offsets_encode(self, target_system, target_component, mag_ofs_x, mag_ofs_y, mag_ofs_z):
'''
Deprecated. Use MAV_CMD_PREFLIGHT_SET_SENSOR_OFFSETS instead. Set the
magnetometer offsets
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
mag_ofs_x : magnetometer X offset (int16_t)
mag_ofs_y : magnetometer Y offset (int16_t)
mag_ofs_z : magnetometer Z offset (int16_t)
'''
return MAVLink_set_mag_offsets_message(target_system, target_component, mag_ofs_x, mag_ofs_y, mag_ofs_z) | Deprecated. Use MAV_CMD_PREFLIGHT_SET_SENSOR_OFFSETS instead. Set the
magnetometer offsets
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
mag_ofs_x : magnetometer X offset (int16_t)
mag_ofs_y : magnetometer Y offset (int16_t)
mag_ofs_z : magnetometer Z offset (int16_t) | Below is the the instruction that describes the task:
### Input:
Deprecated. Use MAV_CMD_PREFLIGHT_SET_SENSOR_OFFSETS instead. Set the
magnetometer offsets
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
mag_ofs_x : magnetometer X offset (int16_t)
mag_ofs_y : magnetometer Y offset (int16_t)
mag_ofs_z : magnetometer Z offset (int16_t)
### Response:
def set_mag_offsets_encode(self, target_system, target_component, mag_ofs_x, mag_ofs_y, mag_ofs_z):
'''
Deprecated. Use MAV_CMD_PREFLIGHT_SET_SENSOR_OFFSETS instead. Set the
magnetometer offsets
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
mag_ofs_x : magnetometer X offset (int16_t)
mag_ofs_y : magnetometer Y offset (int16_t)
mag_ofs_z : magnetometer Z offset (int16_t)
'''
return MAVLink_set_mag_offsets_message(target_system, target_component, mag_ofs_x, mag_ofs_y, mag_ofs_z) |
def pop_min(self):
"""
Remove the minimum value and return it.
"""
if self.root is NULL:
raise KeyError("pop from an empty blackjack")
self.root, value = self.root.delete_min()
self._len -= 1
return value | Remove the minimum value and return it. | Below is the the instruction that describes the task:
### Input:
Remove the minimum value and return it.
### Response:
def pop_min(self):
"""
Remove the minimum value and return it.
"""
if self.root is NULL:
raise KeyError("pop from an empty blackjack")
self.root, value = self.root.delete_min()
self._len -= 1
return value |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.