code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def imu_changed(self, val):
"""Handle clicks on the IMU index spinner."""
self.current_imuid = '{}_IMU{}'.format(self.sk8.get_device_name(), val)
self.update_data_display(self.get_current_data()) | Handle clicks on the IMU index spinner. | Below is the the instruction that describes the task:
### Input:
Handle clicks on the IMU index spinner.
### Response:
def imu_changed(self, val):
"""Handle clicks on the IMU index spinner."""
self.current_imuid = '{}_IMU{}'.format(self.sk8.get_device_name(), val)
self.update_data_display(self.get_current_data()) |
def run_azure(target, jobs, n=1, nproc=None, path='.', delete=True, config=None, **kwargs):
"""
Evaluate the given function with each set of arguments, and return a list of results.
This function does in parallel with Microsoft Azure Batch.
This function is the work in progress.
The argument `nproc` doesn't work yet.
See `ecell4.extra.azure_batch.run_azure` for details.
See Also
--------
ecell4.extra.ensemble.run_serial
ecell4.extra.ensemble.run_sge
ecell4.extra.ensemble.run_slurm
ecell4.extra.ensemble.run_multiprocessing
ecell4.extra.ensemble.run_azure
ecell4.extra.azure_batch.run_azure
"""
import ecell4.extra.azure_batch as azure_batch
return azure_batch.run_azure(target, jobs, n, path, delete, config) | Evaluate the given function with each set of arguments, and return a list of results.
This function does in parallel with Microsoft Azure Batch.
This function is the work in progress.
The argument `nproc` doesn't work yet.
See `ecell4.extra.azure_batch.run_azure` for details.
See Also
--------
ecell4.extra.ensemble.run_serial
ecell4.extra.ensemble.run_sge
ecell4.extra.ensemble.run_slurm
ecell4.extra.ensemble.run_multiprocessing
ecell4.extra.ensemble.run_azure
ecell4.extra.azure_batch.run_azure | Below is the the instruction that describes the task:
### Input:
Evaluate the given function with each set of arguments, and return a list of results.
This function does in parallel with Microsoft Azure Batch.
This function is the work in progress.
The argument `nproc` doesn't work yet.
See `ecell4.extra.azure_batch.run_azure` for details.
See Also
--------
ecell4.extra.ensemble.run_serial
ecell4.extra.ensemble.run_sge
ecell4.extra.ensemble.run_slurm
ecell4.extra.ensemble.run_multiprocessing
ecell4.extra.ensemble.run_azure
ecell4.extra.azure_batch.run_azure
### Response:
def run_azure(target, jobs, n=1, nproc=None, path='.', delete=True, config=None, **kwargs):
"""
Evaluate the given function with each set of arguments, and return a list of results.
This function does in parallel with Microsoft Azure Batch.
This function is the work in progress.
The argument `nproc` doesn't work yet.
See `ecell4.extra.azure_batch.run_azure` for details.
See Also
--------
ecell4.extra.ensemble.run_serial
ecell4.extra.ensemble.run_sge
ecell4.extra.ensemble.run_slurm
ecell4.extra.ensemble.run_multiprocessing
ecell4.extra.ensemble.run_azure
ecell4.extra.azure_batch.run_azure
"""
import ecell4.extra.azure_batch as azure_batch
return azure_batch.run_azure(target, jobs, n, path, delete, config) |
def _complete_multipart_upload(self, bucket_name, object_name,
upload_id, uploaded_parts):
"""
Complete an active multipart upload request.
:param bucket_name: Bucket name of the multipart request.
:param object_name: Object name of the multipart request.
:param upload_id: Upload id of the active multipart request.
:param uploaded_parts: Key, Value dictionary of uploaded parts.
"""
is_valid_bucket_name(bucket_name)
is_non_empty_string(object_name)
is_non_empty_string(upload_id)
# Order uploaded parts as required by S3 specification
ordered_parts = []
for part in sorted(uploaded_parts.keys()):
ordered_parts.append(uploaded_parts[part])
data = xml_marshal_complete_multipart_upload(ordered_parts)
sha256_hex = get_sha256_hexdigest(data)
md5_base64 = get_md5_base64digest(data)
headers = {
'Content-Length': len(data),
'Content-Type': 'application/xml',
'Content-Md5': md5_base64,
}
response = self._url_open('POST', bucket_name=bucket_name,
object_name=object_name,
query={'uploadId': upload_id},
headers=headers, body=data,
content_sha256=sha256_hex)
return parse_multipart_upload_result(response.data) | Complete an active multipart upload request.
:param bucket_name: Bucket name of the multipart request.
:param object_name: Object name of the multipart request.
:param upload_id: Upload id of the active multipart request.
:param uploaded_parts: Key, Value dictionary of uploaded parts. | Below is the the instruction that describes the task:
### Input:
Complete an active multipart upload request.
:param bucket_name: Bucket name of the multipart request.
:param object_name: Object name of the multipart request.
:param upload_id: Upload id of the active multipart request.
:param uploaded_parts: Key, Value dictionary of uploaded parts.
### Response:
def _complete_multipart_upload(self, bucket_name, object_name,
upload_id, uploaded_parts):
"""
Complete an active multipart upload request.
:param bucket_name: Bucket name of the multipart request.
:param object_name: Object name of the multipart request.
:param upload_id: Upload id of the active multipart request.
:param uploaded_parts: Key, Value dictionary of uploaded parts.
"""
is_valid_bucket_name(bucket_name)
is_non_empty_string(object_name)
is_non_empty_string(upload_id)
# Order uploaded parts as required by S3 specification
ordered_parts = []
for part in sorted(uploaded_parts.keys()):
ordered_parts.append(uploaded_parts[part])
data = xml_marshal_complete_multipart_upload(ordered_parts)
sha256_hex = get_sha256_hexdigest(data)
md5_base64 = get_md5_base64digest(data)
headers = {
'Content-Length': len(data),
'Content-Type': 'application/xml',
'Content-Md5': md5_base64,
}
response = self._url_open('POST', bucket_name=bucket_name,
object_name=object_name,
query={'uploadId': upload_id},
headers=headers, body=data,
content_sha256=sha256_hex)
return parse_multipart_upload_result(response.data) |
def short_title(self):
"""
Generates an abbreviated title by subtracting the parent's title from this instance's title.
"""
if self.title and self.parent is not None and hasattr(self.parent, 'title') and self.parent.title:
if self.title.startswith(self.parent.title):
short = self.title[len(self.parent.title):].strip()
match = _punctuation_re.match(short)
if match:
short = short[match.end():].strip()
if short:
return short
return self.title | Generates an abbreviated title by subtracting the parent's title from this instance's title. | Below is the the instruction that describes the task:
### Input:
Generates an abbreviated title by subtracting the parent's title from this instance's title.
### Response:
def short_title(self):
"""
Generates an abbreviated title by subtracting the parent's title from this instance's title.
"""
if self.title and self.parent is not None and hasattr(self.parent, 'title') and self.parent.title:
if self.title.startswith(self.parent.title):
short = self.title[len(self.parent.title):].strip()
match = _punctuation_re.match(short)
if match:
short = short[match.end():].strip()
if short:
return short
return self.title |
def dump_report_content(self, request, result):
"""
Dumps the content to a string, suitable to being written on a file.
:param result: The result being processed.
:return: string
"""
output = StringIO()
writer = csv.writer(output, **self.csv_kwargs)
writer.writerows([result.headers] + result.values)
return output.getvalue() | Dumps the content to a string, suitable to being written on a file.
:param result: The result being processed.
:return: string | Below is the the instruction that describes the task:
### Input:
Dumps the content to a string, suitable to being written on a file.
:param result: The result being processed.
:return: string
### Response:
def dump_report_content(self, request, result):
"""
Dumps the content to a string, suitable to being written on a file.
:param result: The result being processed.
:return: string
"""
output = StringIO()
writer = csv.writer(output, **self.csv_kwargs)
writer.writerows([result.headers] + result.values)
return output.getvalue() |
def get_authorization_user(self, **kwargs):
"""Gets the user the authorization object is for."""
if self.authorization_user is not None:
return self.authorization_user
self.authorization_user = self.request.user
return self.request.user | Gets the user the authorization object is for. | Below is the the instruction that describes the task:
### Input:
Gets the user the authorization object is for.
### Response:
def get_authorization_user(self, **kwargs):
"""Gets the user the authorization object is for."""
if self.authorization_user is not None:
return self.authorization_user
self.authorization_user = self.request.user
return self.request.user |
def lint(to_lint):
"""
Run all linters against a list of files.
:param to_lint: a list of files to lint.
"""
exit_code = 0
for linter, options in (('pyflakes', []), ('pep8', [])):
try:
output = local[linter](*(options + to_lint))
except commands.ProcessExecutionError as e:
output = e.stdout
if output:
exit_code = 1
print "{0} Errors:".format(linter)
print output
output = hacked_pep257(to_lint)
if output:
exit_code = 1
print "Docstring Errors:".format(linter.upper())
print output
sys.exit(exit_code) | Run all linters against a list of files.
:param to_lint: a list of files to lint. | Below is the the instruction that describes the task:
### Input:
Run all linters against a list of files.
:param to_lint: a list of files to lint.
### Response:
def lint(to_lint):
"""
Run all linters against a list of files.
:param to_lint: a list of files to lint.
"""
exit_code = 0
for linter, options in (('pyflakes', []), ('pep8', [])):
try:
output = local[linter](*(options + to_lint))
except commands.ProcessExecutionError as e:
output = e.stdout
if output:
exit_code = 1
print "{0} Errors:".format(linter)
print output
output = hacked_pep257(to_lint)
if output:
exit_code = 1
print "Docstring Errors:".format(linter.upper())
print output
sys.exit(exit_code) |
def _get_exception_class_from_status_code(status_code):
"""
Utility function that accepts a status code, and spits out a reference
to the correct exception class to raise.
:param str status_code: The status code to return an exception class for.
:rtype: PetfinderAPIError or None
:returns: The appropriate PetfinderAPIError subclass. If the status code
is not an error, return ``None``.
"""
if status_code == '100':
return None
exc_class = STATUS_CODE_MAPPING.get(status_code)
if not exc_class:
# No status code match, return the "I don't know wtf this is"
# exception class.
return STATUS_CODE_MAPPING['UNKNOWN']
else:
# Match found, yay.
return exc_class | Utility function that accepts a status code, and spits out a reference
to the correct exception class to raise.
:param str status_code: The status code to return an exception class for.
:rtype: PetfinderAPIError or None
:returns: The appropriate PetfinderAPIError subclass. If the status code
is not an error, return ``None``. | Below is the the instruction that describes the task:
### Input:
Utility function that accepts a status code, and spits out a reference
to the correct exception class to raise.
:param str status_code: The status code to return an exception class for.
:rtype: PetfinderAPIError or None
:returns: The appropriate PetfinderAPIError subclass. If the status code
is not an error, return ``None``.
### Response:
def _get_exception_class_from_status_code(status_code):
"""
Utility function that accepts a status code, and spits out a reference
to the correct exception class to raise.
:param str status_code: The status code to return an exception class for.
:rtype: PetfinderAPIError or None
:returns: The appropriate PetfinderAPIError subclass. If the status code
is not an error, return ``None``.
"""
if status_code == '100':
return None
exc_class = STATUS_CODE_MAPPING.get(status_code)
if not exc_class:
# No status code match, return the "I don't know wtf this is"
# exception class.
return STATUS_CODE_MAPPING['UNKNOWN']
else:
# Match found, yay.
return exc_class |
def exchange(self, data):
"""
Perform SPI transaction.
The first received byte is either ACK or NACK.
:TODO: enforce rule that up to 63 bytes of data can be sent.
:TODO: enforce rule that there is no gaps in data bytes (what define a gap?)
:param data: List of bytes
:returns: List of bytes
:rtype: List of bytes
"""
self._usbiss.write_data([self._usbiss.SPI_CMD] + data)
response = self._usbiss.read_data(1 + len(data))
if len(response) != 0:
response = self._usbiss.decode(response)
status = response.pop(0)
if status == 0:
raise USBISSError('SPI Transmission Error')
return response
else:
raise USBISSError('SPI Transmission Error: No bytes received!') | Perform SPI transaction.
The first received byte is either ACK or NACK.
:TODO: enforce rule that up to 63 bytes of data can be sent.
:TODO: enforce rule that there is no gaps in data bytes (what define a gap?)
:param data: List of bytes
:returns: List of bytes
:rtype: List of bytes | Below is the the instruction that describes the task:
### Input:
Perform SPI transaction.
The first received byte is either ACK or NACK.
:TODO: enforce rule that up to 63 bytes of data can be sent.
:TODO: enforce rule that there is no gaps in data bytes (what define a gap?)
:param data: List of bytes
:returns: List of bytes
:rtype: List of bytes
### Response:
def exchange(self, data):
"""
Perform SPI transaction.
The first received byte is either ACK or NACK.
:TODO: enforce rule that up to 63 bytes of data can be sent.
:TODO: enforce rule that there is no gaps in data bytes (what define a gap?)
:param data: List of bytes
:returns: List of bytes
:rtype: List of bytes
"""
self._usbiss.write_data([self._usbiss.SPI_CMD] + data)
response = self._usbiss.read_data(1 + len(data))
if len(response) != 0:
response = self._usbiss.decode(response)
status = response.pop(0)
if status == 0:
raise USBISSError('SPI Transmission Error')
return response
else:
raise USBISSError('SPI Transmission Error: No bytes received!') |
def eth_getCode(self, address):
"""Get account code.
:param address:
:return:
"""
account = self.reader._get_account(address)
return _encode_hex(account.code) | Get account code.
:param address:
:return: | Below is the the instruction that describes the task:
### Input:
Get account code.
:param address:
:return:
### Response:
def eth_getCode(self, address):
"""Get account code.
:param address:
:return:
"""
account = self.reader._get_account(address)
return _encode_hex(account.code) |
def disassemble(self, offset, size):
"""
Disassembles a given offset in the DEX file
:param offset: offset to disassemble in the file (from the beginning of the file)
:type offset: int
:param size:
:type size:
"""
for i in DCode(
self.CM, offset, size,
self.get_buff()[offset:offset + size]).get_instructions():
yield i | Disassembles a given offset in the DEX file
:param offset: offset to disassemble in the file (from the beginning of the file)
:type offset: int
:param size:
:type size: | Below is the the instruction that describes the task:
### Input:
Disassembles a given offset in the DEX file
:param offset: offset to disassemble in the file (from the beginning of the file)
:type offset: int
:param size:
:type size:
### Response:
def disassemble(self, offset, size):
"""
Disassembles a given offset in the DEX file
:param offset: offset to disassemble in the file (from the beginning of the file)
:type offset: int
:param size:
:type size:
"""
for i in DCode(
self.CM, offset, size,
self.get_buff()[offset:offset + size]).get_instructions():
yield i |
def get_default_key_store(*args, config, **kwargs):
""" This method returns the default **key** store
that uses an SQLite database internally.
:params str appname: The appname that is used internally to distinguish
different SQLite files
"""
kwargs["appname"] = kwargs.get("appname", "graphene")
return SqliteEncryptedKeyStore(config=config, **kwargs) | This method returns the default **key** store
that uses an SQLite database internally.
:params str appname: The appname that is used internally to distinguish
different SQLite files | Below is the the instruction that describes the task:
### Input:
This method returns the default **key** store
that uses an SQLite database internally.
:params str appname: The appname that is used internally to distinguish
different SQLite files
### Response:
def get_default_key_store(*args, config, **kwargs):
""" This method returns the default **key** store
that uses an SQLite database internally.
:params str appname: The appname that is used internally to distinguish
different SQLite files
"""
kwargs["appname"] = kwargs.get("appname", "graphene")
return SqliteEncryptedKeyStore(config=config, **kwargs) |
def download_and_extract_dataset(self, destination_directory: str):
"""
Downloads and extracts the MUSCIMA++ dataset along with the images from the CVC-MUSCIMA dataset
that were manually annotated (140 out of 1000 images).
"""
if not os.path.exists(self.get_dataset_filename()):
print("Downloading MUSCIMA++ Dataset...")
self.download_file(self.get_dataset_download_url(), self.get_dataset_filename())
if not os.path.exists(self.get_imageset_filename()):
print("Downloading MUSCIMA++ Images...")
self.download_file(self.get_images_download_url(), self.get_imageset_filename())
print("Extracting MUSCIMA++ Dataset...")
self.extract_dataset(os.path.abspath(destination_directory))
absolute_path_to_temp_folder = os.path.abspath('MuscimaPpImages')
self.extract_dataset(absolute_path_to_temp_folder, self.get_imageset_filename())
DatasetDownloader.copytree(os.path.join(absolute_path_to_temp_folder, "fulls"),
os.path.join(os.path.abspath(destination_directory), self.dataset_version(), "data",
"images"))
self.clean_up_temp_directory(absolute_path_to_temp_folder) | Downloads and extracts the MUSCIMA++ dataset along with the images from the CVC-MUSCIMA dataset
that were manually annotated (140 out of 1000 images). | Below is the the instruction that describes the task:
### Input:
Downloads and extracts the MUSCIMA++ dataset along with the images from the CVC-MUSCIMA dataset
that were manually annotated (140 out of 1000 images).
### Response:
def download_and_extract_dataset(self, destination_directory: str):
"""
Downloads and extracts the MUSCIMA++ dataset along with the images from the CVC-MUSCIMA dataset
that were manually annotated (140 out of 1000 images).
"""
if not os.path.exists(self.get_dataset_filename()):
print("Downloading MUSCIMA++ Dataset...")
self.download_file(self.get_dataset_download_url(), self.get_dataset_filename())
if not os.path.exists(self.get_imageset_filename()):
print("Downloading MUSCIMA++ Images...")
self.download_file(self.get_images_download_url(), self.get_imageset_filename())
print("Extracting MUSCIMA++ Dataset...")
self.extract_dataset(os.path.abspath(destination_directory))
absolute_path_to_temp_folder = os.path.abspath('MuscimaPpImages')
self.extract_dataset(absolute_path_to_temp_folder, self.get_imageset_filename())
DatasetDownloader.copytree(os.path.join(absolute_path_to_temp_folder, "fulls"),
os.path.join(os.path.abspath(destination_directory), self.dataset_version(), "data",
"images"))
self.clean_up_temp_directory(absolute_path_to_temp_folder) |
def register_form_factory(Form, app):
"""Return extended registration form."""
if app.config.get('RECAPTCHA_PUBLIC_KEY') and \
app.config.get('RECAPTCHA_PRIVATE_KEY'):
class RegisterForm(Form):
recaptcha = FormField(RegistrationFormRecaptcha, separator='.')
return RegisterForm
return Form | Return extended registration form. | Below is the the instruction that describes the task:
### Input:
Return extended registration form.
### Response:
def register_form_factory(Form, app):
"""Return extended registration form."""
if app.config.get('RECAPTCHA_PUBLIC_KEY') and \
app.config.get('RECAPTCHA_PRIVATE_KEY'):
class RegisterForm(Form):
recaptcha = FormField(RegistrationFormRecaptcha, separator='.')
return RegisterForm
return Form |
def save(self):
'''saves our config objet to file'''
if self.app.cfg_mode == 'json':
with open(self.app.cfg_file, 'w') as opened_file:
json.dump(self.app.cfg, opened_file)
else:
with open(self.app.cfg_file, 'w')as opened_file:
yaml.dump(self.app.cfg, opened_file) | saves our config objet to file | Below is the the instruction that describes the task:
### Input:
saves our config objet to file
### Response:
def save(self):
'''saves our config objet to file'''
if self.app.cfg_mode == 'json':
with open(self.app.cfg_file, 'w') as opened_file:
json.dump(self.app.cfg, opened_file)
else:
with open(self.app.cfg_file, 'w')as opened_file:
yaml.dump(self.app.cfg, opened_file) |
def get_next_entry(self, method, info, request):
"""Cycle through available responses, but only once.
Any subsequent requests will receive the last response"""
if method not in self.current_entries:
self.current_entries[method] = 0
# restrict selection to entries that match the requested method
entries_for_method = [e for e in self.entries if e.method == method]
if self.current_entries[method] >= len(entries_for_method):
self.current_entries[method] = -1
if not self.entries or not entries_for_method:
raise ValueError('I have no entries for method %s: %s'
% (method, self))
entry = entries_for_method[self.current_entries[method]]
if self.current_entries[method] != -1:
self.current_entries[method] += 1
# Attach more info to the entry
# So the callback can be more clever about what to do
# This does also fix the case where the callback
# would be handed a compiled regex as uri instead of the
# real uri
entry.info = info
entry.request = request
return entry | Cycle through available responses, but only once.
Any subsequent requests will receive the last response | Below is the the instruction that describes the task:
### Input:
Cycle through available responses, but only once.
Any subsequent requests will receive the last response
### Response:
def get_next_entry(self, method, info, request):
"""Cycle through available responses, but only once.
Any subsequent requests will receive the last response"""
if method not in self.current_entries:
self.current_entries[method] = 0
# restrict selection to entries that match the requested method
entries_for_method = [e for e in self.entries if e.method == method]
if self.current_entries[method] >= len(entries_for_method):
self.current_entries[method] = -1
if not self.entries or not entries_for_method:
raise ValueError('I have no entries for method %s: %s'
% (method, self))
entry = entries_for_method[self.current_entries[method]]
if self.current_entries[method] != -1:
self.current_entries[method] += 1
# Attach more info to the entry
# So the callback can be more clever about what to do
# This does also fix the case where the callback
# would be handed a compiled regex as uri instead of the
# real uri
entry.info = info
entry.request = request
return entry |
def name_for_callable(func):
"""Returns a module name for a callable or `None` if no name can be found."""
if isinstance(func, functools.partial):
return name_for_callable(func.func)
try:
name = func.__name__
except AttributeError:
return None
if name == "<lambda>":
return None
else:
return to_snake_case(name) | Returns a module name for a callable or `None` if no name can be found. | Below is the the instruction that describes the task:
### Input:
Returns a module name for a callable or `None` if no name can be found.
### Response:
def name_for_callable(func):
"""Returns a module name for a callable or `None` if no name can be found."""
if isinstance(func, functools.partial):
return name_for_callable(func.func)
try:
name = func.__name__
except AttributeError:
return None
if name == "<lambda>":
return None
else:
return to_snake_case(name) |
def stop(self):
"""
Stop the interface
:rtype: None
"""
should_sleep = self._is_running
super(Sensor, self).stop()
if should_sleep:
# Make sure everything has enough time to exit
time.sleep(max(self._select_timeout, self._retransmit_timeout) + 1)
if self._listen_socket is not None:
self._shutdown_listen_socket() | Stop the interface
:rtype: None | Below is the the instruction that describes the task:
### Input:
Stop the interface
:rtype: None
### Response:
def stop(self):
"""
Stop the interface
:rtype: None
"""
should_sleep = self._is_running
super(Sensor, self).stop()
if should_sleep:
# Make sure everything has enough time to exit
time.sleep(max(self._select_timeout, self._retransmit_timeout) + 1)
if self._listen_socket is not None:
self._shutdown_listen_socket() |
def __all_parameters_processed(self):
"""
Finish the argument processing.
Should be called after all the web service operation's parameters have
been successfully processed and, afterwards, no further parameter
processing is allowed.
Returns a 2-tuple containing the number of required & allowed
arguments.
See the _ArgParser class description for more detailed information.
"""
assert self.active()
sentinel_frame = self.__stack[0]
self.__pop_frames_above(sentinel_frame)
assert len(self.__stack) == 1
self.__pop_top_frame()
assert not self.active()
args_required = sentinel_frame.args_required()
args_allowed = sentinel_frame.args_allowed()
self.__check_for_extra_arguments(args_required, args_allowed)
return args_required, args_allowed | Finish the argument processing.
Should be called after all the web service operation's parameters have
been successfully processed and, afterwards, no further parameter
processing is allowed.
Returns a 2-tuple containing the number of required & allowed
arguments.
See the _ArgParser class description for more detailed information. | Below is the the instruction that describes the task:
### Input:
Finish the argument processing.
Should be called after all the web service operation's parameters have
been successfully processed and, afterwards, no further parameter
processing is allowed.
Returns a 2-tuple containing the number of required & allowed
arguments.
See the _ArgParser class description for more detailed information.
### Response:
def __all_parameters_processed(self):
"""
Finish the argument processing.
Should be called after all the web service operation's parameters have
been successfully processed and, afterwards, no further parameter
processing is allowed.
Returns a 2-tuple containing the number of required & allowed
arguments.
See the _ArgParser class description for more detailed information.
"""
assert self.active()
sentinel_frame = self.__stack[0]
self.__pop_frames_above(sentinel_frame)
assert len(self.__stack) == 1
self.__pop_top_frame()
assert not self.active()
args_required = sentinel_frame.args_required()
args_allowed = sentinel_frame.args_allowed()
self.__check_for_extra_arguments(args_required, args_allowed)
return args_required, args_allowed |
def unregister_path(self, path):
"""
Unregisters given path.
:param path: Path name.
:type path: unicode
:return: Method success.
:rtype: bool
"""
if not path in self:
raise umbra.exceptions.PathExistsError("{0} | '{1}' path isn't registered!".format(
self.__class__.__name__, path))
del (self.__paths[path])
return True | Unregisters given path.
:param path: Path name.
:type path: unicode
:return: Method success.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Unregisters given path.
:param path: Path name.
:type path: unicode
:return: Method success.
:rtype: bool
### Response:
def unregister_path(self, path):
"""
Unregisters given path.
:param path: Path name.
:type path: unicode
:return: Method success.
:rtype: bool
"""
if not path in self:
raise umbra.exceptions.PathExistsError("{0} | '{1}' path isn't registered!".format(
self.__class__.__name__, path))
del (self.__paths[path])
return True |
def _max_gain_split(self, examples):
"""
Returns an OnlineInformationGain of the attribute with
max gain based on `examples`.
"""
gains = self._new_set_of_gain_counters()
for example in examples:
for gain in gains:
gain.add(example)
winner = max(gains, key=lambda gain: gain.get_gain())
if not winner.get_target_class_counts():
raise ValueError("Dataset is empty")
return winner | Returns an OnlineInformationGain of the attribute with
max gain based on `examples`. | Below is the the instruction that describes the task:
### Input:
Returns an OnlineInformationGain of the attribute with
max gain based on `examples`.
### Response:
def _max_gain_split(self, examples):
"""
Returns an OnlineInformationGain of the attribute with
max gain based on `examples`.
"""
gains = self._new_set_of_gain_counters()
for example in examples:
for gain in gains:
gain.add(example)
winner = max(gains, key=lambda gain: gain.get_gain())
if not winner.get_target_class_counts():
raise ValueError("Dataset is empty")
return winner |
def find_node(self, point, cur_node = None):
"""!
@brief Find node with coordinates that are defined by specified point.
@details If node with specified parameters does not exist then None will be returned,
otherwise required node will be returned.
@param[in] point (list): Coordinates of the point whose node should be found.
@param[in] cur_node (node): Node from which search should be started.
@return (node) Node if it satisfies to input parameters, otherwise it return None.
"""
rule_search = lambda node, point=point: self.__point_comparator(node.data, point)
return self.__find_node_by_rule(point, rule_search, cur_node) | !
@brief Find node with coordinates that are defined by specified point.
@details If node with specified parameters does not exist then None will be returned,
otherwise required node will be returned.
@param[in] point (list): Coordinates of the point whose node should be found.
@param[in] cur_node (node): Node from which search should be started.
@return (node) Node if it satisfies to input parameters, otherwise it return None. | Below is the the instruction that describes the task:
### Input:
!
@brief Find node with coordinates that are defined by specified point.
@details If node with specified parameters does not exist then None will be returned,
otherwise required node will be returned.
@param[in] point (list): Coordinates of the point whose node should be found.
@param[in] cur_node (node): Node from which search should be started.
@return (node) Node if it satisfies to input parameters, otherwise it return None.
### Response:
def find_node(self, point, cur_node = None):
"""!
@brief Find node with coordinates that are defined by specified point.
@details If node with specified parameters does not exist then None will be returned,
otherwise required node will be returned.
@param[in] point (list): Coordinates of the point whose node should be found.
@param[in] cur_node (node): Node from which search should be started.
@return (node) Node if it satisfies to input parameters, otherwise it return None.
"""
rule_search = lambda node, point=point: self.__point_comparator(node.data, point)
return self.__find_node_by_rule(point, rule_search, cur_node) |
def zip(self, destination: typing.Union[str, Path] = None, encode: bool = True) -> str:
"""
Write mission, dictionary etc. to a MIZ file
Args:
destination: target MIZ file (if none, defaults to source MIZ + "_EMIZ"
Returns: destination file
"""
if encode:
self._encode()
if destination is None:
destination_path = self.miz_path.parent.joinpath(f'{self.miz_path.stem}_EMIZ.miz')
else:
destination_path = elib.path.ensure_file(destination, must_exist=False)
LOGGER.debug('zipping mission to: %s', destination_path)
destination_path.write_bytes(dummy_miz)
with ZipFile(str(destination_path), mode='w', compression=8) as zip_file:
for root, _, items in os.walk(self.temp_dir.absolute()):
for item in items:
item_abs_path = Path(root, item).absolute()
item_rel_path = Path(item_abs_path).relative_to(self.temp_dir)
zip_file.write(item_abs_path, arcname=item_rel_path)
return str(destination_path) | Write mission, dictionary etc. to a MIZ file
Args:
destination: target MIZ file (if none, defaults to source MIZ + "_EMIZ"
Returns: destination file | Below is the the instruction that describes the task:
### Input:
Write mission, dictionary etc. to a MIZ file
Args:
destination: target MIZ file (if none, defaults to source MIZ + "_EMIZ"
Returns: destination file
### Response:
def zip(self, destination: typing.Union[str, Path] = None, encode: bool = True) -> str:
"""
Write mission, dictionary etc. to a MIZ file
Args:
destination: target MIZ file (if none, defaults to source MIZ + "_EMIZ"
Returns: destination file
"""
if encode:
self._encode()
if destination is None:
destination_path = self.miz_path.parent.joinpath(f'{self.miz_path.stem}_EMIZ.miz')
else:
destination_path = elib.path.ensure_file(destination, must_exist=False)
LOGGER.debug('zipping mission to: %s', destination_path)
destination_path.write_bytes(dummy_miz)
with ZipFile(str(destination_path), mode='w', compression=8) as zip_file:
for root, _, items in os.walk(self.temp_dir.absolute()):
for item in items:
item_abs_path = Path(root, item).absolute()
item_rel_path = Path(item_abs_path).relative_to(self.temp_dir)
zip_file.write(item_abs_path, arcname=item_rel_path)
return str(destination_path) |
def temperature(self):
"""Get the temperature from the sensor.
:returns: The temperature in degree celcius as a float
:example:
>>> sensor = BMP180(gw)
>>> sensor.load_calibration()
>>> sensor.temperature()
21.4
"""
ut = self.get_raw_temp()
x1 = ((ut - self.cal['AC6']) * self.cal['AC5']) >> 15
x2 = (self.cal['MC'] << 11) // (x1 + self.cal['MD'])
b5 = x1 + x2
return ((b5 + 8) >> 4) / 10 | Get the temperature from the sensor.
:returns: The temperature in degree celcius as a float
:example:
>>> sensor = BMP180(gw)
>>> sensor.load_calibration()
>>> sensor.temperature()
21.4 | Below is the the instruction that describes the task:
### Input:
Get the temperature from the sensor.
:returns: The temperature in degree celcius as a float
:example:
>>> sensor = BMP180(gw)
>>> sensor.load_calibration()
>>> sensor.temperature()
21.4
### Response:
def temperature(self):
"""Get the temperature from the sensor.
:returns: The temperature in degree celcius as a float
:example:
>>> sensor = BMP180(gw)
>>> sensor.load_calibration()
>>> sensor.temperature()
21.4
"""
ut = self.get_raw_temp()
x1 = ((ut - self.cal['AC6']) * self.cal['AC5']) >> 15
x2 = (self.cal['MC'] << 11) // (x1 + self.cal['MD'])
b5 = x1 + x2
return ((b5 + 8) >> 4) / 10 |
def create_shipping_address(self, shipping_address):
"""Creates a shipping address on an existing account. If you are
creating an account, you can embed the shipping addresses with the
request"""
url = urljoin(self._url, '/shipping_addresses')
return shipping_address.post(url) | Creates a shipping address on an existing account. If you are
creating an account, you can embed the shipping addresses with the
request | Below is the the instruction that describes the task:
### Input:
Creates a shipping address on an existing account. If you are
creating an account, you can embed the shipping addresses with the
request
### Response:
def create_shipping_address(self, shipping_address):
"""Creates a shipping address on an existing account. If you are
creating an account, you can embed the shipping addresses with the
request"""
url = urljoin(self._url, '/shipping_addresses')
return shipping_address.post(url) |
def t0_supconj_to_perpass(t0_supconj, period, ecc, per0):
"""
TODO: add documentation
"""
return ConstraintParameter(t0_supconj._bundle, "t0_supconj_to_perpass({}, {}, {}, {})".format(_get_expr(t0_supconj), _get_expr(period), _get_expr(ecc), _get_expr(per0))) | TODO: add documentation | Below is the the instruction that describes the task:
### Input:
TODO: add documentation
### Response:
def t0_supconj_to_perpass(t0_supconj, period, ecc, per0):
"""
TODO: add documentation
"""
return ConstraintParameter(t0_supconj._bundle, "t0_supconj_to_perpass({}, {}, {}, {})".format(_get_expr(t0_supconj), _get_expr(period), _get_expr(ecc), _get_expr(per0))) |
def update(self, *args, **kwargs):
"""Modifies the parameters and adds metadata for update results.
Currently it does not support `PUT` method, which works as replacing
the resource. This is somehow questionable in relation DB.
"""
if request.method == 'PUT':
logging.warning("Called not implemented resource method PUT")
resource = super(JsonServerResource, self).update(*args, **kwargs)
if resource:
return resource
else:
return NOT_FOUND | Modifies the parameters and adds metadata for update results.
Currently it does not support `PUT` method, which works as replacing
the resource. This is somehow questionable in relation DB. | Below is the the instruction that describes the task:
### Input:
Modifies the parameters and adds metadata for update results.
Currently it does not support `PUT` method, which works as replacing
the resource. This is somehow questionable in relation DB.
### Response:
def update(self, *args, **kwargs):
"""Modifies the parameters and adds metadata for update results.
Currently it does not support `PUT` method, which works as replacing
the resource. This is somehow questionable in relation DB.
"""
if request.method == 'PUT':
logging.warning("Called not implemented resource method PUT")
resource = super(JsonServerResource, self).update(*args, **kwargs)
if resource:
return resource
else:
return NOT_FOUND |
async def deploy(self, charm, series, application, options, constraints,
storage, endpoint_bindings, *args):
"""
:param charm string:
Charm holds the URL of the charm to be used to deploy this
application.
:param series string:
Series holds the series of the application to be deployed
if the charm default is not sufficient.
:param application string:
Application holds the application name.
:param options map[string]interface{}:
Options holds application options.
:param constraints string:
Constraints holds the optional application constraints.
:param storage map[string]string:
Storage holds the optional storage constraints.
:param endpoint_bindings map[string]string:
EndpointBindings holds the optional endpoint bindings
:param devices map[string]string:
Devices holds the optional devices constraints.
(Only given on Juju 2.5+)
:param resources map[string]int:
Resources identifies the revision to use for each resource
of the application's charm.
:param num_units int:
NumUnits holds the number of units required. For IAAS models, this
will be 0 and separate AddUnitChanges will be used. For Kubernetes
models, this will be used to scale the application.
(Only given on Juju 2.5+)
"""
# resolve indirect references
charm = self.resolve(charm)
if len(args) == 1:
# Juju 2.4 and below only sends the resources
resources = args[0]
devices, num_units = None, None
else:
# Juju 2.5+ sends devices before resources, as well as num_units
# There might be placement but we need to ignore that.
devices, resources, num_units = args[:3]
if not charm.startswith('local:'):
resources = await self.model._add_store_resources(
application, charm, overrides=resources)
await self.model._deploy(
charm_url=charm,
application=application,
series=series,
config=options,
constraints=constraints,
endpoint_bindings=endpoint_bindings,
resources=resources,
storage=storage,
devices=devices,
num_units=num_units,
)
return application | :param charm string:
Charm holds the URL of the charm to be used to deploy this
application.
:param series string:
Series holds the series of the application to be deployed
if the charm default is not sufficient.
:param application string:
Application holds the application name.
:param options map[string]interface{}:
Options holds application options.
:param constraints string:
Constraints holds the optional application constraints.
:param storage map[string]string:
Storage holds the optional storage constraints.
:param endpoint_bindings map[string]string:
EndpointBindings holds the optional endpoint bindings
:param devices map[string]string:
Devices holds the optional devices constraints.
(Only given on Juju 2.5+)
:param resources map[string]int:
Resources identifies the revision to use for each resource
of the application's charm.
:param num_units int:
NumUnits holds the number of units required. For IAAS models, this
will be 0 and separate AddUnitChanges will be used. For Kubernetes
models, this will be used to scale the application.
(Only given on Juju 2.5+) | Below is the the instruction that describes the task:
### Input:
:param charm string:
Charm holds the URL of the charm to be used to deploy this
application.
:param series string:
Series holds the series of the application to be deployed
if the charm default is not sufficient.
:param application string:
Application holds the application name.
:param options map[string]interface{}:
Options holds application options.
:param constraints string:
Constraints holds the optional application constraints.
:param storage map[string]string:
Storage holds the optional storage constraints.
:param endpoint_bindings map[string]string:
EndpointBindings holds the optional endpoint bindings
:param devices map[string]string:
Devices holds the optional devices constraints.
(Only given on Juju 2.5+)
:param resources map[string]int:
Resources identifies the revision to use for each resource
of the application's charm.
:param num_units int:
NumUnits holds the number of units required. For IAAS models, this
will be 0 and separate AddUnitChanges will be used. For Kubernetes
models, this will be used to scale the application.
(Only given on Juju 2.5+)
### Response:
async def deploy(self, charm, series, application, options, constraints,
storage, endpoint_bindings, *args):
"""
:param charm string:
Charm holds the URL of the charm to be used to deploy this
application.
:param series string:
Series holds the series of the application to be deployed
if the charm default is not sufficient.
:param application string:
Application holds the application name.
:param options map[string]interface{}:
Options holds application options.
:param constraints string:
Constraints holds the optional application constraints.
:param storage map[string]string:
Storage holds the optional storage constraints.
:param endpoint_bindings map[string]string:
EndpointBindings holds the optional endpoint bindings
:param devices map[string]string:
Devices holds the optional devices constraints.
(Only given on Juju 2.5+)
:param resources map[string]int:
Resources identifies the revision to use for each resource
of the application's charm.
:param num_units int:
NumUnits holds the number of units required. For IAAS models, this
will be 0 and separate AddUnitChanges will be used. For Kubernetes
models, this will be used to scale the application.
(Only given on Juju 2.5+)
"""
# resolve indirect references
charm = self.resolve(charm)
if len(args) == 1:
# Juju 2.4 and below only sends the resources
resources = args[0]
devices, num_units = None, None
else:
# Juju 2.5+ sends devices before resources, as well as num_units
# There might be placement but we need to ignore that.
devices, resources, num_units = args[:3]
if not charm.startswith('local:'):
resources = await self.model._add_store_resources(
application, charm, overrides=resources)
await self.model._deploy(
charm_url=charm,
application=application,
series=series,
config=options,
constraints=constraints,
endpoint_bindings=endpoint_bindings,
resources=resources,
storage=storage,
devices=devices,
num_units=num_units,
)
return application |
def nn_poll(fds, timeout=-1):
"""
nn_pollfds
:param fds: dict (file descriptor => pollmode)
:param timeout: timeout in milliseconds
:return:
"""
polls = []
for i, entry in enumerate(fds.items()):
s = PollFds()
fd, event = entry
s.fd = fd
s.events = event
s.revents = 0
polls.append(s)
poll_array = (PollFds*len(fds))(*polls)
res = _nn_poll(poll_array, len(fds), int(timeout))
if res <= 0:
return res, {}
return res, {item.fd: item.revents for item in poll_array} | nn_pollfds
:param fds: dict (file descriptor => pollmode)
:param timeout: timeout in milliseconds
:return: | Below is the the instruction that describes the task:
### Input:
nn_pollfds
:param fds: dict (file descriptor => pollmode)
:param timeout: timeout in milliseconds
:return:
### Response:
def nn_poll(fds, timeout=-1):
"""
nn_pollfds
:param fds: dict (file descriptor => pollmode)
:param timeout: timeout in milliseconds
:return:
"""
polls = []
for i, entry in enumerate(fds.items()):
s = PollFds()
fd, event = entry
s.fd = fd
s.events = event
s.revents = 0
polls.append(s)
poll_array = (PollFds*len(fds))(*polls)
res = _nn_poll(poll_array, len(fds), int(timeout))
if res <= 0:
return res, {}
return res, {item.fd: item.revents for item in poll_array} |
def execute(self, jvm_options=None, args=None, executor=None,
workunit_factory=None, workunit_name=None, workunit_labels=None):
"""Executes the ivy commandline client with the given args.
Raises Ivy.Error if the command fails for any reason.
:param executor: Java executor to run ivy with.
"""
# NB(gmalmquist): It should be OK that we can't declare a subsystem_dependency in this file
# (because it's just a plain old object), because Ivy is only constructed by Bootstrapper, which
# makes an explicit call to IvySubsystem.global_instance() in its constructor, which in turn has
# a declared dependency on DistributionLocator.
executor = executor or SubprocessExecutor(DistributionLocator.cached())
runner = self.runner(jvm_options=jvm_options, args=args, executor=executor)
try:
with self.resolution_lock:
result = util.execute_runner(runner, workunit_factory, workunit_name, workunit_labels)
if result != 0:
raise self.Error('Ivy command failed with exit code {}{}'.format(
result, ': ' + ' '.join(args) if args else ''))
except executor.Error as e:
raise self.Error('Problem executing ivy: {}'.format(e)) | Executes the ivy commandline client with the given args.
Raises Ivy.Error if the command fails for any reason.
:param executor: Java executor to run ivy with. | Below is the the instruction that describes the task:
### Input:
Executes the ivy commandline client with the given args.
Raises Ivy.Error if the command fails for any reason.
:param executor: Java executor to run ivy with.
### Response:
def execute(self, jvm_options=None, args=None, executor=None,
workunit_factory=None, workunit_name=None, workunit_labels=None):
"""Executes the ivy commandline client with the given args.
Raises Ivy.Error if the command fails for any reason.
:param executor: Java executor to run ivy with.
"""
# NB(gmalmquist): It should be OK that we can't declare a subsystem_dependency in this file
# (because it's just a plain old object), because Ivy is only constructed by Bootstrapper, which
# makes an explicit call to IvySubsystem.global_instance() in its constructor, which in turn has
# a declared dependency on DistributionLocator.
executor = executor or SubprocessExecutor(DistributionLocator.cached())
runner = self.runner(jvm_options=jvm_options, args=args, executor=executor)
try:
with self.resolution_lock:
result = util.execute_runner(runner, workunit_factory, workunit_name, workunit_labels)
if result != 0:
raise self.Error('Ivy command failed with exit code {}{}'.format(
result, ': ' + ' '.join(args) if args else ''))
except executor.Error as e:
raise self.Error('Problem executing ivy: {}'.format(e)) |
def gcs_write(self, log, remote_log_location, append=True):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
if append:
try:
old_log = self.gcs_read(remote_log_location)
log = '\n'.join([old_log, log]) if old_log else log
except Exception as e:
if not hasattr(e, 'resp') or e.resp.get('status') != '404':
log = '*** Previous log discarded: {}\n\n'.format(str(e)) + log
try:
bkt, blob = self.parse_gcs_url(remote_log_location)
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(mode='w+') as tmpfile:
tmpfile.write(log)
# Force the file to be flushed, since we're doing the
# upload from within the file context (it hasn't been
# closed).
tmpfile.flush()
self.hook.upload(bkt, blob, tmpfile.name)
except Exception as e:
self.log.error('Could not write logs to %s: %s', remote_log_location, e) | Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool | Below is the the instruction that describes the task:
### Input:
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
### Response:
def gcs_write(self, log, remote_log_location, append=True):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: str
:param remote_log_location: the log's location in remote storage
:type remote_log_location: str (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
if append:
try:
old_log = self.gcs_read(remote_log_location)
log = '\n'.join([old_log, log]) if old_log else log
except Exception as e:
if not hasattr(e, 'resp') or e.resp.get('status') != '404':
log = '*** Previous log discarded: {}\n\n'.format(str(e)) + log
try:
bkt, blob = self.parse_gcs_url(remote_log_location)
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(mode='w+') as tmpfile:
tmpfile.write(log)
# Force the file to be flushed, since we're doing the
# upload from within the file context (it hasn't been
# closed).
tmpfile.flush()
self.hook.upload(bkt, blob, tmpfile.name)
except Exception as e:
self.log.error('Could not write logs to %s: %s', remote_log_location, e) |
def _stop(self):
""" Stops recording. Returns all recorded data and their timestamps. Destroys recorder process."""
self._pause()
self._cmds_q.put(("stop",))
try:
self._recorder.terminate()
except Exception:
pass
self._recording = False | Stops recording. Returns all recorded data and their timestamps. Destroys recorder process. | Below is the the instruction that describes the task:
### Input:
Stops recording. Returns all recorded data and their timestamps. Destroys recorder process.
### Response:
def _stop(self):
""" Stops recording. Returns all recorded data and their timestamps. Destroys recorder process."""
self._pause()
self._cmds_q.put(("stop",))
try:
self._recorder.terminate()
except Exception:
pass
self._recording = False |
def start_service(addr, n):
""" Start a service """
s = Service(addr)
s.register('add', lambda x, y: x + y)
started = time.time()
for _ in range(n):
s.process()
duration = time.time() - started
time.sleep(0.1)
print('Service stats:')
util.print_stats(n, duration)
return | Start a service | Below is the the instruction that describes the task:
### Input:
Start a service
### Response:
def start_service(addr, n):
""" Start a service """
s = Service(addr)
s.register('add', lambda x, y: x + y)
started = time.time()
for _ in range(n):
s.process()
duration = time.time() - started
time.sleep(0.1)
print('Service stats:')
util.print_stats(n, duration)
return |
def pull(self, repository, tag=None, stream=True, **kwargs):
"""
Identical to :meth:`dockermap.client.base.DockerClientWrapper.pull` with two enhancements:
* additional logging;
* the ``insecure_registry`` flag can be passed through ``kwargs``, or set as default using
``env.docker_registry_insecure``.
"""
c_insecure = kwargs.pop('insecure_registry', env.get('docker_registry_insecure'))
set_raise_on_error(kwargs)
try:
return super(DockerFabricClient, self).pull(repository, tag=tag, stream=stream,
insecure_registry=c_insecure, **kwargs)
except DockerStatusError as e:
error(e.message) | Identical to :meth:`dockermap.client.base.DockerClientWrapper.pull` with two enhancements:
* additional logging;
* the ``insecure_registry`` flag can be passed through ``kwargs``, or set as default using
``env.docker_registry_insecure``. | Below is the the instruction that describes the task:
### Input:
Identical to :meth:`dockermap.client.base.DockerClientWrapper.pull` with two enhancements:
* additional logging;
* the ``insecure_registry`` flag can be passed through ``kwargs``, or set as default using
``env.docker_registry_insecure``.
### Response:
def pull(self, repository, tag=None, stream=True, **kwargs):
"""
Identical to :meth:`dockermap.client.base.DockerClientWrapper.pull` with two enhancements:
* additional logging;
* the ``insecure_registry`` flag can be passed through ``kwargs``, or set as default using
``env.docker_registry_insecure``.
"""
c_insecure = kwargs.pop('insecure_registry', env.get('docker_registry_insecure'))
set_raise_on_error(kwargs)
try:
return super(DockerFabricClient, self).pull(repository, tag=tag, stream=stream,
insecure_registry=c_insecure, **kwargs)
except DockerStatusError as e:
error(e.message) |
def nonzero_pixels(self):
""" Return an array of the nonzero pixels.
Returns
-------
:obj:`numpy.ndarray`
Nx2 array of the nonzero pixels
"""
nonzero_px = np.where(np.sum(self.raw_data, axis=2) > 0)
nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]]
return nonzero_px | Return an array of the nonzero pixels.
Returns
-------
:obj:`numpy.ndarray`
Nx2 array of the nonzero pixels | Below is the the instruction that describes the task:
### Input:
Return an array of the nonzero pixels.
Returns
-------
:obj:`numpy.ndarray`
Nx2 array of the nonzero pixels
### Response:
def nonzero_pixels(self):
""" Return an array of the nonzero pixels.
Returns
-------
:obj:`numpy.ndarray`
Nx2 array of the nonzero pixels
"""
nonzero_px = np.where(np.sum(self.raw_data, axis=2) > 0)
nonzero_px = np.c_[nonzero_px[0], nonzero_px[1]]
return nonzero_px |
def send_one_ping(self, current_socket):
"""
Send one ICMP ECHO_REQUEST.
"""
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
checksum = 0
# Make a dummy header with a 0 checksum.
header = struct.pack(
"!BBHHH", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number
)
padBytes = []
startVal = 0x42
for i in range(startVal, startVal + (self.packet_size)):
padBytes += [(i & 0xff)] # Keep chars in the 0-255 range
data = bytes(padBytes)
# Calculate the checksum on the data and the dummy header.
checksum = calculate_checksum(header + data) # Checksum is in network order
# Now that we have the right checksum, we put that in. It's just easier
# to make up a new header than to stuff it into the dummy.
header = struct.pack(
"!BBHHH", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number
)
packet = header + data
send_time = default_timer()
try:
current_socket.sendto(packet, (self.destination, 1)) # Port number is irrelevant for ICMP
except socket.error as e:
print("General failure (%s)" % (e.args[1]))
current_socket.close()
return
return send_time | Send one ICMP ECHO_REQUEST. | Below is the the instruction that describes the task:
### Input:
Send one ICMP ECHO_REQUEST.
### Response:
def send_one_ping(self, current_socket):
"""
Send one ICMP ECHO_REQUEST.
"""
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
checksum = 0
# Make a dummy header with a 0 checksum.
header = struct.pack(
"!BBHHH", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number
)
padBytes = []
startVal = 0x42
for i in range(startVal, startVal + (self.packet_size)):
padBytes += [(i & 0xff)] # Keep chars in the 0-255 range
data = bytes(padBytes)
# Calculate the checksum on the data and the dummy header.
checksum = calculate_checksum(header + data) # Checksum is in network order
# Now that we have the right checksum, we put that in. It's just easier
# to make up a new header than to stuff it into the dummy.
header = struct.pack(
"!BBHHH", ICMP_ECHO, 0, checksum, self.own_id, self.seq_number
)
packet = header + data
send_time = default_timer()
try:
current_socket.sendto(packet, (self.destination, 1)) # Port number is irrelevant for ICMP
except socket.error as e:
print("General failure (%s)" % (e.args[1]))
current_socket.close()
return
return send_time |
def get_or_create_media(self, api_media):
"""
Find or create a Media object given API data.
:param api_media: the API data for the Media
:return: a tuple of an Media instance and a boolean indicating whether the Media was created or not
"""
return Media.objects.get_or_create(site_id=self.site_id,
wp_id=api_media["ID"],
defaults=self.api_object_data("media", api_media)) | Find or create a Media object given API data.
:param api_media: the API data for the Media
:return: a tuple of an Media instance and a boolean indicating whether the Media was created or not | Below is the the instruction that describes the task:
### Input:
Find or create a Media object given API data.
:param api_media: the API data for the Media
:return: a tuple of an Media instance and a boolean indicating whether the Media was created or not
### Response:
def get_or_create_media(self, api_media):
"""
Find or create a Media object given API data.
:param api_media: the API data for the Media
:return: a tuple of an Media instance and a boolean indicating whether the Media was created or not
"""
return Media.objects.get_or_create(site_id=self.site_id,
wp_id=api_media["ID"],
defaults=self.api_object_data("media", api_media)) |
def _set_show_mpls_rsvp(self, v, load=False):
"""
Setter method for show_mpls_rsvp, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_rsvp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_rsvp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_mpls_rsvp.show_mpls_rsvp, is_leaf=True, yang_name="show-mpls-rsvp", rest_name="show-mpls-rsvp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMplsRsvp'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_mpls_rsvp must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_mpls_rsvp.show_mpls_rsvp, is_leaf=True, yang_name="show-mpls-rsvp", rest_name="show-mpls-rsvp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMplsRsvp'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__show_mpls_rsvp = t
if hasattr(self, '_set'):
self._set() | Setter method for show_mpls_rsvp, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_rsvp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_rsvp() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for show_mpls_rsvp, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_rsvp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_rsvp() directly.
### Response:
def _set_show_mpls_rsvp(self, v, load=False):
"""
Setter method for show_mpls_rsvp, mapped from YANG variable /brocade_mpls_rpc/show_mpls_rsvp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_rsvp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_rsvp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_mpls_rsvp.show_mpls_rsvp, is_leaf=True, yang_name="show-mpls-rsvp", rest_name="show-mpls-rsvp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMplsRsvp'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_mpls_rsvp must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_mpls_rsvp.show_mpls_rsvp, is_leaf=True, yang_name="show-mpls-rsvp", rest_name="show-mpls-rsvp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMplsRsvp'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__show_mpls_rsvp = t
if hasattr(self, '_set'):
self._set() |
def as_command(self):
"""Creates the click command wrapping the function
"""
try:
params = self.unbound_func.__click_params__
params.reverse()
del self.unbound_func.__click_params__
except AttributeError:
params = []
help = inspect.getdoc(self.real_func)
if isinstance(help, bytes):
help = help.decode('utf-8')
self.options.setdefault('help', help)
@pass_script_info_decorator
def callback(info, *args, **kwargs):
if self.with_reloader:
app = info.load_app()
if app.debug:
def inner():
return self.command_callback(info, *args, **kwargs)
run_with_reloader(inner, extra_files=get_reloader_extra_files())
return
self.command_callback(info, *args, **kwargs)
return self.cls(name=self.name, callback=callback, params=params, **self.options) | Creates the click command wrapping the function | Below is the the instruction that describes the task:
### Input:
Creates the click command wrapping the function
### Response:
def as_command(self):
"""Creates the click command wrapping the function
"""
try:
params = self.unbound_func.__click_params__
params.reverse()
del self.unbound_func.__click_params__
except AttributeError:
params = []
help = inspect.getdoc(self.real_func)
if isinstance(help, bytes):
help = help.decode('utf-8')
self.options.setdefault('help', help)
@pass_script_info_decorator
def callback(info, *args, **kwargs):
if self.with_reloader:
app = info.load_app()
if app.debug:
def inner():
return self.command_callback(info, *args, **kwargs)
run_with_reloader(inner, extra_files=get_reloader_extra_files())
return
self.command_callback(info, *args, **kwargs)
return self.cls(name=self.name, callback=callback, params=params, **self.options) |
def p_expression_ulnot(self, p):
'expression : LNOT expression %prec ULNOT'
p[0] = Ulnot(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | expression : LNOT expression %prec ULNOT | Below is the the instruction that describes the task:
### Input:
expression : LNOT expression %prec ULNOT
### Response:
def p_expression_ulnot(self, p):
'expression : LNOT expression %prec ULNOT'
p[0] = Ulnot(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def scale_in(self, blocks=0, machines=0, strategy=None):
''' Scale in resources
'''
count = 0
instances = self.client.servers.list()
for instance in instances[0:machines]:
print("Deleting : ", instance)
instance.delete()
count += 1
return count | Scale in resources | Below is the the instruction that describes the task:
### Input:
Scale in resources
### Response:
def scale_in(self, blocks=0, machines=0, strategy=None):
''' Scale in resources
'''
count = 0
instances = self.client.servers.list()
for instance in instances[0:machines]:
print("Deleting : ", instance)
instance.delete()
count += 1
return count |
def serialize(self, value, **kwargs):
"""Serialize instance to JSON
If the value is a HasProperties instance, it is serialized with
the include_class argument passed along. Otherwise, to_json is
called.
"""
kwargs.update({'include_class': kwargs.get('include_class', True)})
if self.serializer is not None:
return self.serializer(value, **kwargs)
if value is None:
return None
if isinstance(value, HasProperties):
return value.serialize(**kwargs)
return self.to_json(value, **kwargs) | Serialize instance to JSON
If the value is a HasProperties instance, it is serialized with
the include_class argument passed along. Otherwise, to_json is
called. | Below is the the instruction that describes the task:
### Input:
Serialize instance to JSON
If the value is a HasProperties instance, it is serialized with
the include_class argument passed along. Otherwise, to_json is
called.
### Response:
def serialize(self, value, **kwargs):
"""Serialize instance to JSON
If the value is a HasProperties instance, it is serialized with
the include_class argument passed along. Otherwise, to_json is
called.
"""
kwargs.update({'include_class': kwargs.get('include_class', True)})
if self.serializer is not None:
return self.serializer(value, **kwargs)
if value is None:
return None
if isinstance(value, HasProperties):
return value.serialize(**kwargs)
return self.to_json(value, **kwargs) |
def endure_multi(self, keys, persist_to=-1, replicate_to=-1,
timeout=5.0, interval=0.010, check_removed=False):
"""Check durability requirements for multiple keys
:param keys: The keys to check
The type of keys may be one of the following:
* Sequence of keys
* A :class:`~couchbase.result.MultiResult` object
* A ``dict`` with CAS values as the dictionary value
* A sequence of :class:`~couchbase.result.Result` objects
:return: A :class:`~.MultiResult` object
of :class:`~.OperationResult` items.
.. seealso:: :meth:`endure`
"""
return _Base.endure_multi(self, keys, persist_to=persist_to,
replicate_to=replicate_to,
timeout=timeout, interval=interval,
check_removed=check_removed) | Check durability requirements for multiple keys
:param keys: The keys to check
The type of keys may be one of the following:
* Sequence of keys
* A :class:`~couchbase.result.MultiResult` object
* A ``dict`` with CAS values as the dictionary value
* A sequence of :class:`~couchbase.result.Result` objects
:return: A :class:`~.MultiResult` object
of :class:`~.OperationResult` items.
.. seealso:: :meth:`endure` | Below is the the instruction that describes the task:
### Input:
Check durability requirements for multiple keys
:param keys: The keys to check
The type of keys may be one of the following:
* Sequence of keys
* A :class:`~couchbase.result.MultiResult` object
* A ``dict`` with CAS values as the dictionary value
* A sequence of :class:`~couchbase.result.Result` objects
:return: A :class:`~.MultiResult` object
of :class:`~.OperationResult` items.
.. seealso:: :meth:`endure`
### Response:
def endure_multi(self, keys, persist_to=-1, replicate_to=-1,
timeout=5.0, interval=0.010, check_removed=False):
"""Check durability requirements for multiple keys
:param keys: The keys to check
The type of keys may be one of the following:
* Sequence of keys
* A :class:`~couchbase.result.MultiResult` object
* A ``dict`` with CAS values as the dictionary value
* A sequence of :class:`~couchbase.result.Result` objects
:return: A :class:`~.MultiResult` object
of :class:`~.OperationResult` items.
.. seealso:: :meth:`endure`
"""
return _Base.endure_multi(self, keys, persist_to=persist_to,
replicate_to=replicate_to,
timeout=timeout, interval=interval,
check_removed=check_removed) |
def get_source_by_alias(self, alias):
"""Given an alias, find the corresponding source in this entry.
If the given alias doesn't exist (e.g. there are no sources), then a
`ValueError` is raised.
Arguments
---------
alias : str
The str-integer (e.g. '8') of the target source.
Returns
-------
source : `astrocats.catalog.source.Source` object
The source object corresponding to the passed alias.
"""
for source in self.get(self._KEYS.SOURCES, []):
if source[self._KEYS.ALIAS] == alias:
return source
raise ValueError("Source '{}': alias '{}' not found!".format(self[
self._KEYS.NAME], alias)) | Given an alias, find the corresponding source in this entry.
If the given alias doesn't exist (e.g. there are no sources), then a
`ValueError` is raised.
Arguments
---------
alias : str
The str-integer (e.g. '8') of the target source.
Returns
-------
source : `astrocats.catalog.source.Source` object
The source object corresponding to the passed alias. | Below is the the instruction that describes the task:
### Input:
Given an alias, find the corresponding source in this entry.
If the given alias doesn't exist (e.g. there are no sources), then a
`ValueError` is raised.
Arguments
---------
alias : str
The str-integer (e.g. '8') of the target source.
Returns
-------
source : `astrocats.catalog.source.Source` object
The source object corresponding to the passed alias.
### Response:
def get_source_by_alias(self, alias):
"""Given an alias, find the corresponding source in this entry.
If the given alias doesn't exist (e.g. there are no sources), then a
`ValueError` is raised.
Arguments
---------
alias : str
The str-integer (e.g. '8') of the target source.
Returns
-------
source : `astrocats.catalog.source.Source` object
The source object corresponding to the passed alias.
"""
for source in self.get(self._KEYS.SOURCES, []):
if source[self._KEYS.ALIAS] == alias:
return source
raise ValueError("Source '{}': alias '{}' not found!".format(self[
self._KEYS.NAME], alias)) |
def get_box_field(self, box_key, field_key = None):
'''Gets one/all field in a box
Args:
box_key key for pipeline
field_key key for field (default: None i.e. ALL)
returns status code, field dict or list thereof
'''
#does not work
self._raise_unimplemented_error()
uri = '/'.join([self.api_uri,
self.boxes_suffix,
box_key,
self.fields_suffix
])
if field_key:
uri = '/'.join([uri, field_key])
return self._req('get', uri) | Gets one/all field in a box
Args:
box_key key for pipeline
field_key key for field (default: None i.e. ALL)
returns status code, field dict or list thereof | Below is the the instruction that describes the task:
### Input:
Gets one/all field in a box
Args:
box_key key for pipeline
field_key key for field (default: None i.e. ALL)
returns status code, field dict or list thereof
### Response:
def get_box_field(self, box_key, field_key = None):
'''Gets one/all field in a box
Args:
box_key key for pipeline
field_key key for field (default: None i.e. ALL)
returns status code, field dict or list thereof
'''
#does not work
self._raise_unimplemented_error()
uri = '/'.join([self.api_uri,
self.boxes_suffix,
box_key,
self.fields_suffix
])
if field_key:
uri = '/'.join([uri, field_key])
return self._req('get', uri) |
def _extract(self, raw: str, station: str) -> str: # type: ignore
"""
Extracts the reports message using string finding
"""
report = raw[raw.find(station.upper() + ' '):]
report = report[:report.find(' =')]
return report | Extracts the reports message using string finding | Below is the the instruction that describes the task:
### Input:
Extracts the reports message using string finding
### Response:
def _extract(self, raw: str, station: str) -> str: # type: ignore
"""
Extracts the reports message using string finding
"""
report = raw[raw.find(station.upper() + ' '):]
report = report[:report.find(' =')]
return report |
def _validate_region(self):
"""Validate region was passed in and is a valid GCE zone."""
if not self.region:
raise GCECloudException(
'Zone is required for GCE cloud framework: '
'Example: us-west1-a'
)
try:
zone = self.compute_driver.ex_get_zone(self.region)
except Exception:
zone = None
if not zone:
raise GCECloudException(
'{region} is not a valid GCE zone. '
'Example: us-west1-a'.format(
region=self.region
)
) | Validate region was passed in and is a valid GCE zone. | Below is the the instruction that describes the task:
### Input:
Validate region was passed in and is a valid GCE zone.
### Response:
def _validate_region(self):
"""Validate region was passed in and is a valid GCE zone."""
if not self.region:
raise GCECloudException(
'Zone is required for GCE cloud framework: '
'Example: us-west1-a'
)
try:
zone = self.compute_driver.ex_get_zone(self.region)
except Exception:
zone = None
if not zone:
raise GCECloudException(
'{region} is not a valid GCE zone. '
'Example: us-west1-a'.format(
region=self.region
)
) |
def save_xml(self, doc, element):
'''Save this configuration data into an xml.dom.Element object.'''
element.setAttributeNS(RTS_NS, RTS_NS_S + 'name', self.name)
if self.data:
element.setAttributeNS(RTS_NS, RTS_NS_S + 'data', self.data) | Save this configuration data into an xml.dom.Element object. | Below is the the instruction that describes the task:
### Input:
Save this configuration data into an xml.dom.Element object.
### Response:
def save_xml(self, doc, element):
'''Save this configuration data into an xml.dom.Element object.'''
element.setAttributeNS(RTS_NS, RTS_NS_S + 'name', self.name)
if self.data:
element.setAttributeNS(RTS_NS, RTS_NS_S + 'data', self.data) |
def add_transition_to_state(from_port, to_port):
"""Interface method between Gaphas and RAFCON core for adding transitions
The method checks the types of the given ports (IncomeView or OutcomeView) and from this determines the necessary
parameters for the add_transition method of the RAFCON core. Also the parent state is derived from the ports.
:param from_port: Port from which the transition starts
:param to_port: Port to which the transition goes to
:return: True if a transition was added, False if an error occurred
"""
from rafcon.gui.mygaphas.items.ports import IncomeView, OutcomeView
from_state_v = from_port.parent
to_state_v = to_port.parent
from_state_m = from_state_v.model
to_state_m = to_state_v.model
# Gather necessary information to create transition
from_state_id = from_state_m.state.state_id
to_state_id = to_state_m.state.state_id
responsible_parent_m = None
# Start transition
if isinstance(from_port, IncomeView):
from_state_id = None
from_outcome_id = None
responsible_parent_m = from_state_m
# Transition from parent income to child income
if isinstance(to_port, IncomeView):
to_outcome_id = None
# Transition from parent income to parent outcome
elif isinstance(to_port, OutcomeView):
to_outcome_id = to_port.outcome_id
elif isinstance(from_port, OutcomeView):
from_outcome_id = from_port.outcome_id
# Transition from child outcome to child income
if isinstance(to_port, IncomeView):
responsible_parent_m = from_state_m.parent
to_outcome_id = None
# Transition from child outcome to parent outcome
elif isinstance(to_port, OutcomeView):
responsible_parent_m = to_state_m
to_outcome_id = to_port.outcome_id
else:
raise ValueError("Invalid port type")
from rafcon.gui.models.container_state import ContainerStateModel
if not responsible_parent_m:
logger.error("Transitions only exist between incomes and outcomes. Given: {0} and {1}".format(type(
from_port), type(to_port)))
return False
elif not isinstance(responsible_parent_m, ContainerStateModel):
logger.error("Transitions only exist in container states (e.g. hierarchy states)")
return False
try:
t_id = responsible_parent_m.state.add_transition(from_state_id, from_outcome_id, to_state_id, to_outcome_id)
if from_state_id == to_state_id:
gui_helper_meta_data.insert_self_transition_meta_data(responsible_parent_m.states[from_state_id], t_id,
combined_action=True)
return True
except (ValueError, AttributeError, TypeError) as e:
logger.error("Transition couldn't be added: {0}".format(e))
return False | Interface method between Gaphas and RAFCON core for adding transitions
The method checks the types of the given ports (IncomeView or OutcomeView) and from this determines the necessary
parameters for the add_transition method of the RAFCON core. Also the parent state is derived from the ports.
:param from_port: Port from which the transition starts
:param to_port: Port to which the transition goes to
:return: True if a transition was added, False if an error occurred | Below is the the instruction that describes the task:
### Input:
Interface method between Gaphas and RAFCON core for adding transitions
The method checks the types of the given ports (IncomeView or OutcomeView) and from this determines the necessary
parameters for the add_transition method of the RAFCON core. Also the parent state is derived from the ports.
:param from_port: Port from which the transition starts
:param to_port: Port to which the transition goes to
:return: True if a transition was added, False if an error occurred
### Response:
def add_transition_to_state(from_port, to_port):
"""Interface method between Gaphas and RAFCON core for adding transitions
The method checks the types of the given ports (IncomeView or OutcomeView) and from this determines the necessary
parameters for the add_transition method of the RAFCON core. Also the parent state is derived from the ports.
:param from_port: Port from which the transition starts
:param to_port: Port to which the transition goes to
:return: True if a transition was added, False if an error occurred
"""
from rafcon.gui.mygaphas.items.ports import IncomeView, OutcomeView
from_state_v = from_port.parent
to_state_v = to_port.parent
from_state_m = from_state_v.model
to_state_m = to_state_v.model
# Gather necessary information to create transition
from_state_id = from_state_m.state.state_id
to_state_id = to_state_m.state.state_id
responsible_parent_m = None
# Start transition
if isinstance(from_port, IncomeView):
from_state_id = None
from_outcome_id = None
responsible_parent_m = from_state_m
# Transition from parent income to child income
if isinstance(to_port, IncomeView):
to_outcome_id = None
# Transition from parent income to parent outcome
elif isinstance(to_port, OutcomeView):
to_outcome_id = to_port.outcome_id
elif isinstance(from_port, OutcomeView):
from_outcome_id = from_port.outcome_id
# Transition from child outcome to child income
if isinstance(to_port, IncomeView):
responsible_parent_m = from_state_m.parent
to_outcome_id = None
# Transition from child outcome to parent outcome
elif isinstance(to_port, OutcomeView):
responsible_parent_m = to_state_m
to_outcome_id = to_port.outcome_id
else:
raise ValueError("Invalid port type")
from rafcon.gui.models.container_state import ContainerStateModel
if not responsible_parent_m:
logger.error("Transitions only exist between incomes and outcomes. Given: {0} and {1}".format(type(
from_port), type(to_port)))
return False
elif not isinstance(responsible_parent_m, ContainerStateModel):
logger.error("Transitions only exist in container states (e.g. hierarchy states)")
return False
try:
t_id = responsible_parent_m.state.add_transition(from_state_id, from_outcome_id, to_state_id, to_outcome_id)
if from_state_id == to_state_id:
gui_helper_meta_data.insert_self_transition_meta_data(responsible_parent_m.states[from_state_id], t_id,
combined_action=True)
return True
except (ValueError, AttributeError, TypeError) as e:
logger.error("Transition couldn't be added: {0}".format(e))
return False |
def remove_terms_by_indices(self, idx_to_delete_list):
'''
Parameters
----------
idx_to_delete_list, list
Returns
-------
TermDocMatrix
'''
new_X, new_term_idx_store = self._get_X_after_delete_terms(idx_to_delete_list)
return self._make_new_term_doc_matrix(new_X, self._mX, self._y, new_term_idx_store, self._category_idx_store,
self._metadata_idx_store, self._y == self._y) | Parameters
----------
idx_to_delete_list, list
Returns
-------
TermDocMatrix | Below is the the instruction that describes the task:
### Input:
Parameters
----------
idx_to_delete_list, list
Returns
-------
TermDocMatrix
### Response:
def remove_terms_by_indices(self, idx_to_delete_list):
'''
Parameters
----------
idx_to_delete_list, list
Returns
-------
TermDocMatrix
'''
new_X, new_term_idx_store = self._get_X_after_delete_terms(idx_to_delete_list)
return self._make_new_term_doc_matrix(new_X, self._mX, self._y, new_term_idx_store, self._category_idx_store,
self._metadata_idx_store, self._y == self._y) |
def get_changes_between_builds(self, project, from_build_id=None, to_build_id=None, top=None):
"""GetChangesBetweenBuilds.
[Preview API] Gets the changes made to the repository between two given builds.
:param str project: Project ID or project name
:param int from_build_id: The ID of the first build.
:param int to_build_id: The ID of the last build.
:param int top: The maximum number of changes to return.
:rtype: [Change]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if from_build_id is not None:
query_parameters['fromBuildId'] = self._serialize.query('from_build_id', from_build_id, 'int')
if to_build_id is not None:
query_parameters['toBuildId'] = self._serialize.query('to_build_id', to_build_id, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='f10f0ea5-18a1-43ec-a8fb-2042c7be9b43',
version='5.0-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Change]', self._unwrap_collection(response)) | GetChangesBetweenBuilds.
[Preview API] Gets the changes made to the repository between two given builds.
:param str project: Project ID or project name
:param int from_build_id: The ID of the first build.
:param int to_build_id: The ID of the last build.
:param int top: The maximum number of changes to return.
:rtype: [Change] | Below is the the instruction that describes the task:
### Input:
GetChangesBetweenBuilds.
[Preview API] Gets the changes made to the repository between two given builds.
:param str project: Project ID or project name
:param int from_build_id: The ID of the first build.
:param int to_build_id: The ID of the last build.
:param int top: The maximum number of changes to return.
:rtype: [Change]
### Response:
def get_changes_between_builds(self, project, from_build_id=None, to_build_id=None, top=None):
"""GetChangesBetweenBuilds.
[Preview API] Gets the changes made to the repository between two given builds.
:param str project: Project ID or project name
:param int from_build_id: The ID of the first build.
:param int to_build_id: The ID of the last build.
:param int top: The maximum number of changes to return.
:rtype: [Change]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if from_build_id is not None:
query_parameters['fromBuildId'] = self._serialize.query('from_build_id', from_build_id, 'int')
if to_build_id is not None:
query_parameters['toBuildId'] = self._serialize.query('to_build_id', to_build_id, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='f10f0ea5-18a1-43ec-a8fb-2042c7be9b43',
version='5.0-preview.2',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[Change]', self._unwrap_collection(response)) |
def command(self, profile=False):
"""This is a Flask-Script like decorator, provide functionality like
@manager.command
def foo():
pass
@manager.command
def foo(first_arg, second_arg, first_option=True, second_option=3):
pass
"""
def wraped(func):
assert inspect.isfunction(func)
self._commands[func.__name__] = Command(func, profile)
self._commands_list.append(func.__name__)
return func
return wraped | This is a Flask-Script like decorator, provide functionality like
@manager.command
def foo():
pass
@manager.command
def foo(first_arg, second_arg, first_option=True, second_option=3):
pass | Below is the the instruction that describes the task:
### Input:
This is a Flask-Script like decorator, provide functionality like
@manager.command
def foo():
pass
@manager.command
def foo(first_arg, second_arg, first_option=True, second_option=3):
pass
### Response:
def command(self, profile=False):
"""This is a Flask-Script like decorator, provide functionality like
@manager.command
def foo():
pass
@manager.command
def foo(first_arg, second_arg, first_option=True, second_option=3):
pass
"""
def wraped(func):
assert inspect.isfunction(func)
self._commands[func.__name__] = Command(func, profile)
self._commands_list.append(func.__name__)
return func
return wraped |
def attach(self, canvas):
"""Attach this interact to a canvas."""
super(PanZoom, self).attach(canvas)
canvas.panzoom = self
canvas.transforms.add_on_gpu([self._translate, self._scale])
# Add the variable declarations.
vs = ('uniform vec2 {};\n'.format(self.pan_var_name) +
'uniform vec2 {};\n'.format(self.zoom_var_name))
canvas.inserter.insert_vert(vs, 'header')
canvas.connect(self.on_resize)
canvas.connect(self.on_mouse_move)
canvas.connect(self.on_touch)
canvas.connect(self.on_key_press)
if self.enable_mouse_wheel:
canvas.connect(self.on_mouse_wheel)
self._set_canvas_aspect() | Attach this interact to a canvas. | Below is the the instruction that describes the task:
### Input:
Attach this interact to a canvas.
### Response:
def attach(self, canvas):
"""Attach this interact to a canvas."""
super(PanZoom, self).attach(canvas)
canvas.panzoom = self
canvas.transforms.add_on_gpu([self._translate, self._scale])
# Add the variable declarations.
vs = ('uniform vec2 {};\n'.format(self.pan_var_name) +
'uniform vec2 {};\n'.format(self.zoom_var_name))
canvas.inserter.insert_vert(vs, 'header')
canvas.connect(self.on_resize)
canvas.connect(self.on_mouse_move)
canvas.connect(self.on_touch)
canvas.connect(self.on_key_press)
if self.enable_mouse_wheel:
canvas.connect(self.on_mouse_wheel)
self._set_canvas_aspect() |
def get_body_hash(params):
"""
Returns BASE64 ( HASH (text) ) as described in OAuth2 MAC spec.
http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.2
"""
norm_params = get_normalized_params(params)
return binascii.b2a_base64(hashlib.sha1(norm_params).digest())[:-1] | Returns BASE64 ( HASH (text) ) as described in OAuth2 MAC spec.
http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.2 | Below is the the instruction that describes the task:
### Input:
Returns BASE64 ( HASH (text) ) as described in OAuth2 MAC spec.
http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.2
### Response:
def get_body_hash(params):
"""
Returns BASE64 ( HASH (text) ) as described in OAuth2 MAC spec.
http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.2
"""
norm_params = get_normalized_params(params)
return binascii.b2a_base64(hashlib.sha1(norm_params).digest())[:-1] |
def parse_asf(source, world, jointgroup=None, density=1000, color=None):
'''Load and parse a source file.
Parameters
----------
source : file
A file-like object that contains text information describing bodies and
joints to add to the world.
world : :class:`pagoda.physics.World`
The world to add objects and joints to.
jointgroup : ode.JointGroup, optional
If provided, add all joints from this parse to the given group. The
default behavior adds joints to the world without an explicit group.
density : float, optional
Default density for bodies. This is overridden if the source provides a
density or mass value for a given body.
color : tuple of floats, optional
Default color for bodies from this source. Defaults to None, which does
not assign a color to parsed bodies.
'''
visitor = AsfVisitor(world, jointgroup, density, color)
visitor.parse(re.sub(r'#.*', ' ', source.read()))
return visitor | Load and parse a source file.
Parameters
----------
source : file
A file-like object that contains text information describing bodies and
joints to add to the world.
world : :class:`pagoda.physics.World`
The world to add objects and joints to.
jointgroup : ode.JointGroup, optional
If provided, add all joints from this parse to the given group. The
default behavior adds joints to the world without an explicit group.
density : float, optional
Default density for bodies. This is overridden if the source provides a
density or mass value for a given body.
color : tuple of floats, optional
Default color for bodies from this source. Defaults to None, which does
not assign a color to parsed bodies. | Below is the the instruction that describes the task:
### Input:
Load and parse a source file.
Parameters
----------
source : file
A file-like object that contains text information describing bodies and
joints to add to the world.
world : :class:`pagoda.physics.World`
The world to add objects and joints to.
jointgroup : ode.JointGroup, optional
If provided, add all joints from this parse to the given group. The
default behavior adds joints to the world without an explicit group.
density : float, optional
Default density for bodies. This is overridden if the source provides a
density or mass value for a given body.
color : tuple of floats, optional
Default color for bodies from this source. Defaults to None, which does
not assign a color to parsed bodies.
### Response:
def parse_asf(source, world, jointgroup=None, density=1000, color=None):
'''Load and parse a source file.
Parameters
----------
source : file
A file-like object that contains text information describing bodies and
joints to add to the world.
world : :class:`pagoda.physics.World`
The world to add objects and joints to.
jointgroup : ode.JointGroup, optional
If provided, add all joints from this parse to the given group. The
default behavior adds joints to the world without an explicit group.
density : float, optional
Default density for bodies. This is overridden if the source provides a
density or mass value for a given body.
color : tuple of floats, optional
Default color for bodies from this source. Defaults to None, which does
not assign a color to parsed bodies.
'''
visitor = AsfVisitor(world, jointgroup, density, color)
visitor.parse(re.sub(r'#.*', ' ', source.read()))
return visitor |
def bitwise_or(self, t):
"""
Binary operation: logical or
:param b: The other operand
:return: self | b
"""
"""
This implementation combines the approaches used by 'WYSINWYX: what you see is not what you execute'
paper and 'Signedness-Agnostic Program Analysis: Precise Integer Bounds for Low-Level Code'. The
first paper provides an sound way to approximate the stride, whereas the second provides a way
to calculate the or operation using wrapping intervals.
Note that, even though according Warren's work 'Hacker's delight', one should follow different
approaches to calculate the minimun/maximum values of an or operations according on the type
of the operands (signed/unsigned). On the other other hand, by splitting the wrapping-intervals
at the south pole, we can safely and soundly only use the Warren's functions for unsigned
integers.
"""
s = self
result_interval = list()
for u in s._ssplit():
for v in t._ssplit():
w = u.bits
# u |w v
if u.is_integer:
s_t = StridedInterval._ntz(v.stride)
elif v.is_integer:
s_t = StridedInterval._ntz(u.stride)
else:
s_t = min(StridedInterval._ntz(u.stride), StridedInterval._ntz(v.stride))
if u.is_integer and u.lower_bound == 0:
new_stride = v.stride
elif v.is_integer and v.lower_bound == 0:
new_stride = u.stride
else:
new_stride = 2 ** s_t
mask = (1 << s_t) - 1
r = (u.lower_bound & mask) | (v.lower_bound & mask)
m = (2 ** w) - 1
low_bound = WarrenMethods.min_or(u.lower_bound & (~mask & m), u.upper_bound & (~mask & m), v.lower_bound & (~mask & m), v.upper_bound & (~mask & m), w)
upper_bound = WarrenMethods.max_or(u.lower_bound & (~mask & m), u.upper_bound & (~mask & m), v.lower_bound & (~mask & m), v.upper_bound & (~mask & m), w)
if low_bound == upper_bound:
new_stride = 0
new_interval = StridedInterval(lower_bound=((low_bound & (~mask & m)) | r), upper_bound=((upper_bound & (~mask & m)) | r), bits=w, stride=new_stride)
result_interval.append(new_interval)
return StridedInterval.least_upper_bound(*result_interval).normalize() | Binary operation: logical or
:param b: The other operand
:return: self | b | Below is the the instruction that describes the task:
### Input:
Binary operation: logical or
:param b: The other operand
:return: self | b
### Response:
def bitwise_or(self, t):
"""
Binary operation: logical or
:param b: The other operand
:return: self | b
"""
"""
This implementation combines the approaches used by 'WYSINWYX: what you see is not what you execute'
paper and 'Signedness-Agnostic Program Analysis: Precise Integer Bounds for Low-Level Code'. The
first paper provides an sound way to approximate the stride, whereas the second provides a way
to calculate the or operation using wrapping intervals.
Note that, even though according Warren's work 'Hacker's delight', one should follow different
approaches to calculate the minimun/maximum values of an or operations according on the type
of the operands (signed/unsigned). On the other other hand, by splitting the wrapping-intervals
at the south pole, we can safely and soundly only use the Warren's functions for unsigned
integers.
"""
s = self
result_interval = list()
for u in s._ssplit():
for v in t._ssplit():
w = u.bits
# u |w v
if u.is_integer:
s_t = StridedInterval._ntz(v.stride)
elif v.is_integer:
s_t = StridedInterval._ntz(u.stride)
else:
s_t = min(StridedInterval._ntz(u.stride), StridedInterval._ntz(v.stride))
if u.is_integer and u.lower_bound == 0:
new_stride = v.stride
elif v.is_integer and v.lower_bound == 0:
new_stride = u.stride
else:
new_stride = 2 ** s_t
mask = (1 << s_t) - 1
r = (u.lower_bound & mask) | (v.lower_bound & mask)
m = (2 ** w) - 1
low_bound = WarrenMethods.min_or(u.lower_bound & (~mask & m), u.upper_bound & (~mask & m), v.lower_bound & (~mask & m), v.upper_bound & (~mask & m), w)
upper_bound = WarrenMethods.max_or(u.lower_bound & (~mask & m), u.upper_bound & (~mask & m), v.lower_bound & (~mask & m), v.upper_bound & (~mask & m), w)
if low_bound == upper_bound:
new_stride = 0
new_interval = StridedInterval(lower_bound=((low_bound & (~mask & m)) | r), upper_bound=((upper_bound & (~mask & m)) | r), bits=w, stride=new_stride)
result_interval.append(new_interval)
return StridedInterval.least_upper_bound(*result_interval).normalize() |
def get_class(class_string):
"""
Get a class from a dotted string
"""
split_string = class_string.encode('ascii').split('.')
import_path = '.'.join(split_string[:-1])
class_name = split_string[-1]
if class_name:
try:
if import_path:
mod = __import__(import_path, globals(), {}, [class_name])
cls = getattr(mod, class_name)
else:
cls = __import__(class_name, globals(), {})
if cls:
return cls
except (ImportError, AttributeError):
pass
return None | Get a class from a dotted string | Below is the the instruction that describes the task:
### Input:
Get a class from a dotted string
### Response:
def get_class(class_string):
"""
Get a class from a dotted string
"""
split_string = class_string.encode('ascii').split('.')
import_path = '.'.join(split_string[:-1])
class_name = split_string[-1]
if class_name:
try:
if import_path:
mod = __import__(import_path, globals(), {}, [class_name])
cls = getattr(mod, class_name)
else:
cls = __import__(class_name, globals(), {})
if cls:
return cls
except (ImportError, AttributeError):
pass
return None |
def classifiers(self):
"""
Returns the list of base classifiers.
:return: the classifier list
:rtype: list
"""
objects = javabridge.get_env().get_object_array_elements(
javabridge.call(self.jobject, "getClassifiers", "()[Lweka/classifiers/Classifier;"))
result = []
for obj in objects:
result.append(Classifier(jobject=obj))
return result | Returns the list of base classifiers.
:return: the classifier list
:rtype: list | Below is the the instruction that describes the task:
### Input:
Returns the list of base classifiers.
:return: the classifier list
:rtype: list
### Response:
def classifiers(self):
"""
Returns the list of base classifiers.
:return: the classifier list
:rtype: list
"""
objects = javabridge.get_env().get_object_array_elements(
javabridge.call(self.jobject, "getClassifiers", "()[Lweka/classifiers/Classifier;"))
result = []
for obj in objects:
result.append(Classifier(jobject=obj))
return result |
def save(self, *args, **kwargs):
"""
Custom save method
"""
# change status to scheduled if necessary
if self.is_scheduled and self.status is not OUTWARD_STATUS.get('scheduled'):
self.status = OUTWARD_STATUS.get('scheduled')
# call super.save()
super(Outward, self).save(*args, **kwargs) | Custom save method | Below is the the instruction that describes the task:
### Input:
Custom save method
### Response:
def save(self, *args, **kwargs):
"""
Custom save method
"""
# change status to scheduled if necessary
if self.is_scheduled and self.status is not OUTWARD_STATUS.get('scheduled'):
self.status = OUTWARD_STATUS.get('scheduled')
# call super.save()
super(Outward, self).save(*args, **kwargs) |
def update_collisions(self):
"""
Test player for collisions with items
"""
if not self.mode['items'] or len(self.mode['items']) == 0: return
# update collman
# FIXME: Why update each frame?
self.collman.clear()
for z, node in self.children:
if hasattr(node, 'cshape') and type(node.cshape) == cm.CircleShape:
self.collman.add(node)
# interactions player - others
for other in self.collman.iter_colliding(self.player):
typeball = other.btype
self.logger.debug('collision', typeball)
# TODO: Limit player position on non-removable items
#if not other.removable:
# pass
if other.removable:
self.to_remove.append(other)
self.reward_item(typeball)
#
# elif (typeball == 'wall' or
# typeball == 'gate' and self.cnt_food > 0):
# self.level_losed()
#
# elif typeball == 'gate':
# self.level_conquered()
self.remove_items() | Test player for collisions with items | Below is the the instruction that describes the task:
### Input:
Test player for collisions with items
### Response:
def update_collisions(self):
"""
Test player for collisions with items
"""
if not self.mode['items'] or len(self.mode['items']) == 0: return
# update collman
# FIXME: Why update each frame?
self.collman.clear()
for z, node in self.children:
if hasattr(node, 'cshape') and type(node.cshape) == cm.CircleShape:
self.collman.add(node)
# interactions player - others
for other in self.collman.iter_colliding(self.player):
typeball = other.btype
self.logger.debug('collision', typeball)
# TODO: Limit player position on non-removable items
#if not other.removable:
# pass
if other.removable:
self.to_remove.append(other)
self.reward_item(typeball)
#
# elif (typeball == 'wall' or
# typeball == 'gate' and self.cnt_food > 0):
# self.level_losed()
#
# elif typeball == 'gate':
# self.level_conquered()
self.remove_items() |
def get_mx_records(domain):
"""
Gets an array of MXRecords associated to the domain specified.
:param domain:
:return: [MXRecord]
"""
import dns.resolver
response = dns.resolver.query(domain, 'MX')
mx_records = []
for answer in response.answers:
mx_records.append(MXRecord(priority=answer.preference, exchange=answer.exchange, domain=domain))
return sorted(mx_records, key=lambda record: record.priority) | Gets an array of MXRecords associated to the domain specified.
:param domain:
:return: [MXRecord] | Below is the the instruction that describes the task:
### Input:
Gets an array of MXRecords associated to the domain specified.
:param domain:
:return: [MXRecord]
### Response:
def get_mx_records(domain):
"""
Gets an array of MXRecords associated to the domain specified.
:param domain:
:return: [MXRecord]
"""
import dns.resolver
response = dns.resolver.query(domain, 'MX')
mx_records = []
for answer in response.answers:
mx_records.append(MXRecord(priority=answer.preference, exchange=answer.exchange, domain=domain))
return sorted(mx_records, key=lambda record: record.priority) |
def draw(self):
"""
Draws the plot to screen.
Note to self: Do NOT call super(MatrixPlot, self).draw(); the
underlying logic for drawing here is completely different from other
plots, and as such necessitates a different implementation.
"""
matrix = nx.to_numpy_matrix(self.graph, nodelist=self.nodes)
self.ax.matshow(matrix, cmap=self.cmap) | Draws the plot to screen.
Note to self: Do NOT call super(MatrixPlot, self).draw(); the
underlying logic for drawing here is completely different from other
plots, and as such necessitates a different implementation. | Below is the the instruction that describes the task:
### Input:
Draws the plot to screen.
Note to self: Do NOT call super(MatrixPlot, self).draw(); the
underlying logic for drawing here is completely different from other
plots, and as such necessitates a different implementation.
### Response:
def draw(self):
"""
Draws the plot to screen.
Note to self: Do NOT call super(MatrixPlot, self).draw(); the
underlying logic for drawing here is completely different from other
plots, and as such necessitates a different implementation.
"""
matrix = nx.to_numpy_matrix(self.graph, nodelist=self.nodes)
self.ax.matshow(matrix, cmap=self.cmap) |
def decode(self, bytes, raw=False):
"""decode(bytearray, raw=False) -> value
Decodes the given bytearray and returns the number of
(fractional) seconds.
If the optional parameter ``raw`` is ``True``, the byte (U8)
itself will be returned.
"""
result = super(Time8Type, self).decode(bytes)
if not raw:
result /= 256.0
return result | decode(bytearray, raw=False) -> value
Decodes the given bytearray and returns the number of
(fractional) seconds.
If the optional parameter ``raw`` is ``True``, the byte (U8)
itself will be returned. | Below is the the instruction that describes the task:
### Input:
decode(bytearray, raw=False) -> value
Decodes the given bytearray and returns the number of
(fractional) seconds.
If the optional parameter ``raw`` is ``True``, the byte (U8)
itself will be returned.
### Response:
def decode(self, bytes, raw=False):
"""decode(bytearray, raw=False) -> value
Decodes the given bytearray and returns the number of
(fractional) seconds.
If the optional parameter ``raw`` is ``True``, the byte (U8)
itself will be returned.
"""
result = super(Time8Type, self).decode(bytes)
if not raw:
result /= 256.0
return result |
def register(cls, name, description, options_registrar_cls=None):
"""Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
:param string name: The name of the goal; ie: the way to specify it on the command line.
:param string description: A description of the tasks in the goal do.
:param :class:pants.option.Optionable options_registrar_cls: A class for registering options
at the goal scope. Useful for registering recursive options on all tasks in a goal.
:return: The freshly registered goal.
:rtype: :class:`_Goal`
"""
goal = cls.by_name(name)
goal._description = description
goal._options_registrar_cls = (options_registrar_cls.registrar_for_scope(name)
if options_registrar_cls else None)
return goal | Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
:param string name: The name of the goal; ie: the way to specify it on the command line.
:param string description: A description of the tasks in the goal do.
:param :class:pants.option.Optionable options_registrar_cls: A class for registering options
at the goal scope. Useful for registering recursive options on all tasks in a goal.
:return: The freshly registered goal.
:rtype: :class:`_Goal` | Below is the the instruction that describes the task:
### Input:
Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
:param string name: The name of the goal; ie: the way to specify it on the command line.
:param string description: A description of the tasks in the goal do.
:param :class:pants.option.Optionable options_registrar_cls: A class for registering options
at the goal scope. Useful for registering recursive options on all tasks in a goal.
:return: The freshly registered goal.
:rtype: :class:`_Goal`
### Response:
def register(cls, name, description, options_registrar_cls=None):
"""Register a goal description.
Otherwise the description must be set when registering some task on the goal,
which is clunky, and dependent on things like registration order of tasks in the goal.
A goal that isn't explicitly registered with a description will fall back to the description
of the task in that goal with the same name (if any). So singleton goals (e.g., 'clean-all')
need not be registered explicitly. This method is primarily useful for setting a
description on a generic goal like 'compile' or 'test', that multiple backends will
register tasks on.
:API: public
:param string name: The name of the goal; ie: the way to specify it on the command line.
:param string description: A description of the tasks in the goal do.
:param :class:pants.option.Optionable options_registrar_cls: A class for registering options
at the goal scope. Useful for registering recursive options on all tasks in a goal.
:return: The freshly registered goal.
:rtype: :class:`_Goal`
"""
goal = cls.by_name(name)
goal._description = description
goal._options_registrar_cls = (options_registrar_cls.registrar_for_scope(name)
if options_registrar_cls else None)
return goal |
def loadRecord(self, domain, rtype, callback=None, errback=None):
"""
Load a high level Record object from a domain within this Zone.
:param str domain: The name of the record to load
:param str rtype: The DNS record type
:rtype: ns1.records.Record
:return: new Record
"""
rec = Record(self, domain, rtype)
return rec.load(callback=callback, errback=errback) | Load a high level Record object from a domain within this Zone.
:param str domain: The name of the record to load
:param str rtype: The DNS record type
:rtype: ns1.records.Record
:return: new Record | Below is the the instruction that describes the task:
### Input:
Load a high level Record object from a domain within this Zone.
:param str domain: The name of the record to load
:param str rtype: The DNS record type
:rtype: ns1.records.Record
:return: new Record
### Response:
def loadRecord(self, domain, rtype, callback=None, errback=None):
"""
Load a high level Record object from a domain within this Zone.
:param str domain: The name of the record to load
:param str rtype: The DNS record type
:rtype: ns1.records.Record
:return: new Record
"""
rec = Record(self, domain, rtype)
return rec.load(callback=callback, errback=errback) |
def p_always_comb(self, p):
'always_comb : ALWAYS_COMB senslist always_statement'
p[0] = AlwaysComb(p[2], p[3], lineno=p.lineno(1)) | always_comb : ALWAYS_COMB senslist always_statement | Below is the the instruction that describes the task:
### Input:
always_comb : ALWAYS_COMB senslist always_statement
### Response:
def p_always_comb(self, p):
'always_comb : ALWAYS_COMB senslist always_statement'
p[0] = AlwaysComb(p[2], p[3], lineno=p.lineno(1)) |
def _validate_type_key(key, value, types, validated):
"""Validate a key's value by type."""
for key_schema, value_schema in types.items():
if not isinstance(key, key_schema):
continue
try:
validated[key] = value_schema(value)
except NotValid:
continue
else:
return []
return ['%r: %r not matched' % (key, value)] | Validate a key's value by type. | Below is the the instruction that describes the task:
### Input:
Validate a key's value by type.
### Response:
def _validate_type_key(key, value, types, validated):
"""Validate a key's value by type."""
for key_schema, value_schema in types.items():
if not isinstance(key, key_schema):
continue
try:
validated[key] = value_schema(value)
except NotValid:
continue
else:
return []
return ['%r: %r not matched' % (key, value)] |
def load_contents(self):
"""
Loads contents of the tables into database.
"""
with open(METADATA_FILE) as f:
lines = f.readlines()
lines = map(lambda x: x.strip(), lines)
exclude_strings = ['<begin_table>', '<end_table>']
list_of_databases_and_columns = filter(
lambda x: not x[0] in exclude_strings, [
list(value) for key, value in itertools.groupby(
lines,
lambda x: x in exclude_strings
)
]
)
for iterator in list_of_databases_and_columns:
self.create_table_raw(
tablename=iterator[0],
columns=iterator[1:][:],
)
for i in self.tables:
i.load_contents() | Loads contents of the tables into database. | Below is the the instruction that describes the task:
### Input:
Loads contents of the tables into database.
### Response:
def load_contents(self):
"""
Loads contents of the tables into database.
"""
with open(METADATA_FILE) as f:
lines = f.readlines()
lines = map(lambda x: x.strip(), lines)
exclude_strings = ['<begin_table>', '<end_table>']
list_of_databases_and_columns = filter(
lambda x: not x[0] in exclude_strings, [
list(value) for key, value in itertools.groupby(
lines,
lambda x: x in exclude_strings
)
]
)
for iterator in list_of_databases_and_columns:
self.create_table_raw(
tablename=iterator[0],
columns=iterator[1:][:],
)
for i in self.tables:
i.load_contents() |
def run_queries(self, backfill_num_days=7):
"""
Run the data queries for the specified projects.
:param backfill_num_days: number of days of historical data to backfill,
if missing
:type backfill_num_days: int
"""
available_tables = self._get_download_table_ids()
logger.debug('Found %d available download tables: %s',
len(available_tables), available_tables)
today_table = available_tables[-1]
yesterday_table = available_tables[-2]
self.query_one_table(today_table)
self.query_one_table(yesterday_table)
self.backfill_history(backfill_num_days, available_tables) | Run the data queries for the specified projects.
:param backfill_num_days: number of days of historical data to backfill,
if missing
:type backfill_num_days: int | Below is the the instruction that describes the task:
### Input:
Run the data queries for the specified projects.
:param backfill_num_days: number of days of historical data to backfill,
if missing
:type backfill_num_days: int
### Response:
def run_queries(self, backfill_num_days=7):
"""
Run the data queries for the specified projects.
:param backfill_num_days: number of days of historical data to backfill,
if missing
:type backfill_num_days: int
"""
available_tables = self._get_download_table_ids()
logger.debug('Found %d available download tables: %s',
len(available_tables), available_tables)
today_table = available_tables[-1]
yesterday_table = available_tables[-2]
self.query_one_table(today_table)
self.query_one_table(yesterday_table)
self.backfill_history(backfill_num_days, available_tables) |
def validate_url(self, url):
"""Validate the :class:`~urllib.parse.ParseResult` object.
This method will make sure the :meth:`~brownant.app.BrownAnt.parse_url`
could work as expected even meet a unexpected URL string.
:param url: the parsed url.
:type url: :class:`~urllib.parse.ParseResult`
"""
# fix up the non-ascii path
url_path = to_bytes_safe(url.path)
url_path = urllib.parse.quote(url_path, safe=b"/%")
# fix up the non-ascii query
url_query = to_bytes_safe(url.query)
url_query = urllib.parse.quote(url_query, safe=b"?=&")
url = urllib.parse.ParseResult(url.scheme, url.netloc, url_path,
url.params, url_query, url.fragment)
# validate the components of URL
has_hostname = url.hostname is not None and len(url.hostname) > 0
has_http_scheme = url.scheme in ("http", "https")
has_path = not len(url.path) or url.path.startswith("/")
if not (has_hostname and has_http_scheme and has_path):
raise NotSupported("invalid url: %s" % repr(url))
return url | Validate the :class:`~urllib.parse.ParseResult` object.
This method will make sure the :meth:`~brownant.app.BrownAnt.parse_url`
could work as expected even meet a unexpected URL string.
:param url: the parsed url.
:type url: :class:`~urllib.parse.ParseResult` | Below is the the instruction that describes the task:
### Input:
Validate the :class:`~urllib.parse.ParseResult` object.
This method will make sure the :meth:`~brownant.app.BrownAnt.parse_url`
could work as expected even meet a unexpected URL string.
:param url: the parsed url.
:type url: :class:`~urllib.parse.ParseResult`
### Response:
def validate_url(self, url):
"""Validate the :class:`~urllib.parse.ParseResult` object.
This method will make sure the :meth:`~brownant.app.BrownAnt.parse_url`
could work as expected even meet a unexpected URL string.
:param url: the parsed url.
:type url: :class:`~urllib.parse.ParseResult`
"""
# fix up the non-ascii path
url_path = to_bytes_safe(url.path)
url_path = urllib.parse.quote(url_path, safe=b"/%")
# fix up the non-ascii query
url_query = to_bytes_safe(url.query)
url_query = urllib.parse.quote(url_query, safe=b"?=&")
url = urllib.parse.ParseResult(url.scheme, url.netloc, url_path,
url.params, url_query, url.fragment)
# validate the components of URL
has_hostname = url.hostname is not None and len(url.hostname) > 0
has_http_scheme = url.scheme in ("http", "https")
has_path = not len(url.path) or url.path.startswith("/")
if not (has_hostname and has_http_scheme and has_path):
raise NotSupported("invalid url: %s" % repr(url))
return url |
def dehydrate(self, iterator):
"""
Pass in an iterator of tweets' JSON and get back an iterator of the
IDs of each tweet.
"""
for line in iterator:
try:
yield json.loads(line)['id_str']
except Exception as e:
log.error("uhoh: %s\n" % e) | Pass in an iterator of tweets' JSON and get back an iterator of the
IDs of each tweet. | Below is the the instruction that describes the task:
### Input:
Pass in an iterator of tweets' JSON and get back an iterator of the
IDs of each tweet.
### Response:
def dehydrate(self, iterator):
"""
Pass in an iterator of tweets' JSON and get back an iterator of the
IDs of each tweet.
"""
for line in iterator:
try:
yield json.loads(line)['id_str']
except Exception as e:
log.error("uhoh: %s\n" % e) |
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa))) | Print val: WARN in yellow on STDOUT | Below is the the instruction that describes the task:
### Input:
Print val: WARN in yellow on STDOUT
### Response:
def pwa(self, val, wa='WARN'):
""" Print val: WARN in yellow on STDOUT """
self.pstd(self.color.yellow('{}: {}'.format(val, wa))) |
def fastp_read_gc_plot(self):
""" Make the read GC plot for Fastp """
data_labels, pdata = self.filter_pconfig_pdata_subplots(self.fastp_gc_content_data, 'Base Content Percent')
pconfig = {
'id': 'fastp-seq-content-gc-plot',
'title': 'Fastp: Read GC Content',
'xlab': 'Read Position',
'ylab': 'R1 Before filtering: Base Content Percent',
'ymax': 100,
'ymin': 0,
'xDecimals': False,
'yLabelFormat': '{value}%',
'tt_label': '{point.x}: {point.y:.2f}%',
'data_labels': data_labels
}
return linegraph.plot(pdata, pconfig) | Make the read GC plot for Fastp | Below is the the instruction that describes the task:
### Input:
Make the read GC plot for Fastp
### Response:
def fastp_read_gc_plot(self):
""" Make the read GC plot for Fastp """
data_labels, pdata = self.filter_pconfig_pdata_subplots(self.fastp_gc_content_data, 'Base Content Percent')
pconfig = {
'id': 'fastp-seq-content-gc-plot',
'title': 'Fastp: Read GC Content',
'xlab': 'Read Position',
'ylab': 'R1 Before filtering: Base Content Percent',
'ymax': 100,
'ymin': 0,
'xDecimals': False,
'yLabelFormat': '{value}%',
'tt_label': '{point.x}: {point.y:.2f}%',
'data_labels': data_labels
}
return linegraph.plot(pdata, pconfig) |
def remove_tier(self, id_tier, clean=True):
"""Remove a tier.
:param str id_tier: Name of the tier.
:param bool clean: Flag to also clean the timeslots.
:raises KeyError: If tier is non existent.
"""
del(self.tiers[id_tier])
if clean:
self.clean_time_slots() | Remove a tier.
:param str id_tier: Name of the tier.
:param bool clean: Flag to also clean the timeslots.
:raises KeyError: If tier is non existent. | Below is the the instruction that describes the task:
### Input:
Remove a tier.
:param str id_tier: Name of the tier.
:param bool clean: Flag to also clean the timeslots.
:raises KeyError: If tier is non existent.
### Response:
def remove_tier(self, id_tier, clean=True):
"""Remove a tier.
:param str id_tier: Name of the tier.
:param bool clean: Flag to also clean the timeslots.
:raises KeyError: If tier is non existent.
"""
del(self.tiers[id_tier])
if clean:
self.clean_time_slots() |
def setStartAction(self, action, *args, **kwargs):
"""
Set a function to call when run() is called, before the main action is called.
Parameters
----------
action: function pointer
The function to call.
*args
Positional arguments to pass to action.
**kwargs:
Keyword arguments to pass to action.
"""
self.init_action = action
self.init_args = args
self.init_kwargs = kwargs | Set a function to call when run() is called, before the main action is called.
Parameters
----------
action: function pointer
The function to call.
*args
Positional arguments to pass to action.
**kwargs:
Keyword arguments to pass to action. | Below is the the instruction that describes the task:
### Input:
Set a function to call when run() is called, before the main action is called.
Parameters
----------
action: function pointer
The function to call.
*args
Positional arguments to pass to action.
**kwargs:
Keyword arguments to pass to action.
### Response:
def setStartAction(self, action, *args, **kwargs):
"""
Set a function to call when run() is called, before the main action is called.
Parameters
----------
action: function pointer
The function to call.
*args
Positional arguments to pass to action.
**kwargs:
Keyword arguments to pass to action.
"""
self.init_action = action
self.init_args = args
self.init_kwargs = kwargs |
def _build_toctree(self):
"""Create a hidden toctree node with the contents of a directory
prefixed by the directory name specified by the `toctree` directive
option.
"""
dirname = posixpath.dirname(self._env.docname)
tree_prefix = self.options['toctree'].strip()
root = posixpath.normpath(posixpath.join(dirname, tree_prefix))
docnames = [docname for docname in self._env.found_docs
if docname.startswith(root)]
# Sort docnames alphabetically based on **class** name.
# The standard we assume is that task doc pages are named after
# their Python namespace.
# NOTE: this ordering only applies to the toctree; the visual ordering
# is set by `process_task_topic_list`.
# NOTE: docnames are **always** POSIX-like paths
class_names = [docname.split('/')[-1].split('.')[-1]
for docname in docnames]
docnames = [docname for docname, _ in
sorted(zip(docnames, class_names),
key=lambda pair: pair[1])]
tocnode = sphinx.addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['entries'] = [(None, docname) for docname in docnames]
tocnode['maxdepth'] = -1
tocnode['glob'] = None
tocnode['hidden'] = True
return tocnode | Create a hidden toctree node with the contents of a directory
prefixed by the directory name specified by the `toctree` directive
option. | Below is the the instruction that describes the task:
### Input:
Create a hidden toctree node with the contents of a directory
prefixed by the directory name specified by the `toctree` directive
option.
### Response:
def _build_toctree(self):
"""Create a hidden toctree node with the contents of a directory
prefixed by the directory name specified by the `toctree` directive
option.
"""
dirname = posixpath.dirname(self._env.docname)
tree_prefix = self.options['toctree'].strip()
root = posixpath.normpath(posixpath.join(dirname, tree_prefix))
docnames = [docname for docname in self._env.found_docs
if docname.startswith(root)]
# Sort docnames alphabetically based on **class** name.
# The standard we assume is that task doc pages are named after
# their Python namespace.
# NOTE: this ordering only applies to the toctree; the visual ordering
# is set by `process_task_topic_list`.
# NOTE: docnames are **always** POSIX-like paths
class_names = [docname.split('/')[-1].split('.')[-1]
for docname in docnames]
docnames = [docname for docname, _ in
sorted(zip(docnames, class_names),
key=lambda pair: pair[1])]
tocnode = sphinx.addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['entries'] = [(None, docname) for docname in docnames]
tocnode['maxdepth'] = -1
tocnode['glob'] = None
tocnode['hidden'] = True
return tocnode |
def stdformD(D, Cd, M, dimN=2):
"""Reshape dictionary array (`D` in :mod:`.admm.cbpdn` module, `X` in
:mod:`.admm.ccmod` module) to internal standard form.
Parameters
----------
D : array_like
Dictionary array
Cd : int
Size of dictionary channel index
M : int
Number of filters in dictionary
dimN : int, optional (default 2)
Number of problem spatial indices
Returns
-------
Dr : ndarray
Reshaped dictionary array
"""
return D.reshape(D.shape[0:dimN] + (Cd,) + (1,) + (M,)) | Reshape dictionary array (`D` in :mod:`.admm.cbpdn` module, `X` in
:mod:`.admm.ccmod` module) to internal standard form.
Parameters
----------
D : array_like
Dictionary array
Cd : int
Size of dictionary channel index
M : int
Number of filters in dictionary
dimN : int, optional (default 2)
Number of problem spatial indices
Returns
-------
Dr : ndarray
Reshaped dictionary array | Below is the the instruction that describes the task:
### Input:
Reshape dictionary array (`D` in :mod:`.admm.cbpdn` module, `X` in
:mod:`.admm.ccmod` module) to internal standard form.
Parameters
----------
D : array_like
Dictionary array
Cd : int
Size of dictionary channel index
M : int
Number of filters in dictionary
dimN : int, optional (default 2)
Number of problem spatial indices
Returns
-------
Dr : ndarray
Reshaped dictionary array
### Response:
def stdformD(D, Cd, M, dimN=2):
"""Reshape dictionary array (`D` in :mod:`.admm.cbpdn` module, `X` in
:mod:`.admm.ccmod` module) to internal standard form.
Parameters
----------
D : array_like
Dictionary array
Cd : int
Size of dictionary channel index
M : int
Number of filters in dictionary
dimN : int, optional (default 2)
Number of problem spatial indices
Returns
-------
Dr : ndarray
Reshaped dictionary array
"""
return D.reshape(D.shape[0:dimN] + (Cd,) + (1,) + (M,)) |
def get_type_data(name):
"""Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type
"""
name = name.upper()
if name in ISO_LANGUAGE_CODES:
name = ISO_LANGUAGE_CODES[name]
if name in ISO_MAJOR_LANGUAGE_TYPES:
namespace = '639-2'
lang_name = ISO_MAJOR_LANGUAGE_TYPES[name]
elif name in ISO_OTHER_LANGUAGE_TYPES:
namespace = '639-3'
lang_name = ISO_OTHER_LANGUAGE_TYPES[name]
else:
raise NotFound('Language Type: ' + name)
return {
'authority': 'ISO',
'namespace': namespace,
'identifier': name,
'domain': 'DisplayText Languages',
'display_name': lang_name + ' Language Type',
'display_label': lang_name,
'description': ('The display text language type for the ' +
lang_name + ' language.')
} | Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type | Below is the the instruction that describes the task:
### Input:
Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type
### Response:
def get_type_data(name):
"""Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type
"""
name = name.upper()
if name in ISO_LANGUAGE_CODES:
name = ISO_LANGUAGE_CODES[name]
if name in ISO_MAJOR_LANGUAGE_TYPES:
namespace = '639-2'
lang_name = ISO_MAJOR_LANGUAGE_TYPES[name]
elif name in ISO_OTHER_LANGUAGE_TYPES:
namespace = '639-3'
lang_name = ISO_OTHER_LANGUAGE_TYPES[name]
else:
raise NotFound('Language Type: ' + name)
return {
'authority': 'ISO',
'namespace': namespace,
'identifier': name,
'domain': 'DisplayText Languages',
'display_name': lang_name + ' Language Type',
'display_label': lang_name,
'description': ('The display text language type for the ' +
lang_name + ' language.')
} |
def _extract_transform_colander_schema(self, args):
"""
Extract schema from view args and transform it using
the pipeline of schema transformers
:param args:
Arguments from the view decorator.
:rtype: colander.MappingSchema()
:returns: View schema cloned and transformed
"""
schema = args.get('schema', colander.MappingSchema())
if not isinstance(schema, colander.Schema):
schema = schema()
schema = schema.clone()
for transformer in self.schema_transformers:
schema = transformer(schema, args)
return schema | Extract schema from view args and transform it using
the pipeline of schema transformers
:param args:
Arguments from the view decorator.
:rtype: colander.MappingSchema()
:returns: View schema cloned and transformed | Below is the the instruction that describes the task:
### Input:
Extract schema from view args and transform it using
the pipeline of schema transformers
:param args:
Arguments from the view decorator.
:rtype: colander.MappingSchema()
:returns: View schema cloned and transformed
### Response:
def _extract_transform_colander_schema(self, args):
"""
Extract schema from view args and transform it using
the pipeline of schema transformers
:param args:
Arguments from the view decorator.
:rtype: colander.MappingSchema()
:returns: View schema cloned and transformed
"""
schema = args.get('schema', colander.MappingSchema())
if not isinstance(schema, colander.Schema):
schema = schema()
schema = schema.clone()
for transformer in self.schema_transformers:
schema = transformer(schema, args)
return schema |
def _make_minimal(dictionary):
"""
This function removes all the keys whose value is either None or an empty
dictionary.
"""
new_dict = {}
for key, value in dictionary.items():
if value is not None:
if isinstance(value, dict):
new_value = _make_minimal(value)
if new_value:
new_dict[key] = new_value
else:
new_dict[key] = value
return new_dict | This function removes all the keys whose value is either None or an empty
dictionary. | Below is the the instruction that describes the task:
### Input:
This function removes all the keys whose value is either None or an empty
dictionary.
### Response:
def _make_minimal(dictionary):
"""
This function removes all the keys whose value is either None or an empty
dictionary.
"""
new_dict = {}
for key, value in dictionary.items():
if value is not None:
if isinstance(value, dict):
new_value = _make_minimal(value)
if new_value:
new_dict[key] = new_value
else:
new_dict[key] = value
return new_dict |
def pin_assets(self, file_or_dir_path: Path) -> List[Dict[str, str]]:
"""
Return a dict containing the IPFS hash, file name, and size of a file.
"""
if file_or_dir_path.is_dir():
asset_data = [dummy_ipfs_pin(path) for path in file_or_dir_path.glob("*")]
elif file_or_dir_path.is_file():
asset_data = [dummy_ipfs_pin(file_or_dir_path)]
else:
raise FileNotFoundError(
f"{file_or_dir_path} is not a valid file or directory path."
)
return asset_data | Return a dict containing the IPFS hash, file name, and size of a file. | Below is the the instruction that describes the task:
### Input:
Return a dict containing the IPFS hash, file name, and size of a file.
### Response:
def pin_assets(self, file_or_dir_path: Path) -> List[Dict[str, str]]:
"""
Return a dict containing the IPFS hash, file name, and size of a file.
"""
if file_or_dir_path.is_dir():
asset_data = [dummy_ipfs_pin(path) for path in file_or_dir_path.glob("*")]
elif file_or_dir_path.is_file():
asset_data = [dummy_ipfs_pin(file_or_dir_path)]
else:
raise FileNotFoundError(
f"{file_or_dir_path} is not a valid file or directory path."
)
return asset_data |
def send_sysex(self, sysex_cmd, data):
"""
Sends a SysEx msg.
:arg sysex_cmd: A sysex command byte
: arg data: a bytearray of 7-bit bytes of arbitrary data
"""
msg = bytearray([START_SYSEX, sysex_cmd])
msg.extend(data)
msg.append(END_SYSEX)
self.sp.write(msg) | Sends a SysEx msg.
:arg sysex_cmd: A sysex command byte
: arg data: a bytearray of 7-bit bytes of arbitrary data | Below is the the instruction that describes the task:
### Input:
Sends a SysEx msg.
:arg sysex_cmd: A sysex command byte
: arg data: a bytearray of 7-bit bytes of arbitrary data
### Response:
def send_sysex(self, sysex_cmd, data):
"""
Sends a SysEx msg.
:arg sysex_cmd: A sysex command byte
: arg data: a bytearray of 7-bit bytes of arbitrary data
"""
msg = bytearray([START_SYSEX, sysex_cmd])
msg.extend(data)
msg.append(END_SYSEX)
self.sp.write(msg) |
def _generate_section(self, name, config, cfg_section='default', remove_sasbase=False):
"""Generate the relevant Sphinx nodes.
Generates a section for the Tree datamodel. Formats a tree section
as a list-table directive.
Parameters:
name (str):
The name of the config to be documented, e.g. 'sdsswork'
config (dict):
The tree dictionary of the loaded config environ
cfg_section (str):
The section of the config to load
remove_sasbase (bool):
If True, removes the SAS_BASE_DIR from the beginning of each path
Returns:
A section docutil node
"""
# the source name
source_name = name
# Title
section = nodes.section(
'',
nodes.title(text=cfg_section),
ids=[nodes.make_id(cfg_section)],
names=[nodes.fully_normalize_name(cfg_section)])
# Summarize
result = statemachine.ViewList()
base = config['default']['filesystem'] if remove_sasbase else None
lines = _format_command(cfg_section, config[cfg_section], base=base)
for line in lines:
result.append(line, source_name)
self.state.nested_parse(result, 0, section)
return [section] | Generate the relevant Sphinx nodes.
Generates a section for the Tree datamodel. Formats a tree section
as a list-table directive.
Parameters:
name (str):
The name of the config to be documented, e.g. 'sdsswork'
config (dict):
The tree dictionary of the loaded config environ
cfg_section (str):
The section of the config to load
remove_sasbase (bool):
If True, removes the SAS_BASE_DIR from the beginning of each path
Returns:
A section docutil node | Below is the the instruction that describes the task:
### Input:
Generate the relevant Sphinx nodes.
Generates a section for the Tree datamodel. Formats a tree section
as a list-table directive.
Parameters:
name (str):
The name of the config to be documented, e.g. 'sdsswork'
config (dict):
The tree dictionary of the loaded config environ
cfg_section (str):
The section of the config to load
remove_sasbase (bool):
If True, removes the SAS_BASE_DIR from the beginning of each path
Returns:
A section docutil node
### Response:
def _generate_section(self, name, config, cfg_section='default', remove_sasbase=False):
"""Generate the relevant Sphinx nodes.
Generates a section for the Tree datamodel. Formats a tree section
as a list-table directive.
Parameters:
name (str):
The name of the config to be documented, e.g. 'sdsswork'
config (dict):
The tree dictionary of the loaded config environ
cfg_section (str):
The section of the config to load
remove_sasbase (bool):
If True, removes the SAS_BASE_DIR from the beginning of each path
Returns:
A section docutil node
"""
# the source name
source_name = name
# Title
section = nodes.section(
'',
nodes.title(text=cfg_section),
ids=[nodes.make_id(cfg_section)],
names=[nodes.fully_normalize_name(cfg_section)])
# Summarize
result = statemachine.ViewList()
base = config['default']['filesystem'] if remove_sasbase else None
lines = _format_command(cfg_section, config[cfg_section], base=base)
for line in lines:
result.append(line, source_name)
self.state.nested_parse(result, 0, section)
return [section] |
def bare_except(logical_line, noqa):
r"""When catching exceptions, mention specific exceptions whenever possible.
Okay: except Exception:
Okay: except BaseException:
E722: except:
"""
if noqa:
return
regex = re.compile(r"except\s*:")
match = regex.match(logical_line)
if match:
yield match.start(), "E722 do not use bare except'" | r"""When catching exceptions, mention specific exceptions whenever possible.
Okay: except Exception:
Okay: except BaseException:
E722: except: | Below is the the instruction that describes the task:
### Input:
r"""When catching exceptions, mention specific exceptions whenever possible.
Okay: except Exception:
Okay: except BaseException:
E722: except:
### Response:
def bare_except(logical_line, noqa):
r"""When catching exceptions, mention specific exceptions whenever possible.
Okay: except Exception:
Okay: except BaseException:
E722: except:
"""
if noqa:
return
regex = re.compile(r"except\s*:")
match = regex.match(logical_line)
if match:
yield match.start(), "E722 do not use bare except'" |
def remove_child_catalog(self, catalog_id, child_id):
"""Removes a child from a catalog.
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: NotFound - ``catalog_id`` is not a parent of
``child_id``
raise: NullArgument - ``catalog_id`` or ``child_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalog(catalog_id=catalog_id, child_id=child_id)
return self._hierarchy_session.remove_child(id_=catalog_id, child_id=child_id) | Removes a child from a catalog.
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: NotFound - ``catalog_id`` is not a parent of
``child_id``
raise: NullArgument - ``catalog_id`` or ``child_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Removes a child from a catalog.
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: NotFound - ``catalog_id`` is not a parent of
``child_id``
raise: NullArgument - ``catalog_id`` or ``child_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
### Response:
def remove_child_catalog(self, catalog_id, child_id):
"""Removes a child from a catalog.
arg: catalog_id (osid.id.Id): the ``Id`` of a catalog
arg: child_id (osid.id.Id): the ``Id`` of the new child
raise: NotFound - ``catalog_id`` is not a parent of
``child_id``
raise: NullArgument - ``catalog_id`` or ``child_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalog(catalog_id=catalog_id, child_id=child_id)
return self._hierarchy_session.remove_child(id_=catalog_id, child_id=child_id) |
def put_policy(Bucket, Policy,
region=None, key=None, keyid=None, profile=None):
'''
Given a valid config, update the policy for a bucket.
Returns {updated: true} if policy was updated and returns
{updated: False} if policy was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.put_policy my_bucket {...}
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if Policy is None:
Policy = '{}'
elif not isinstance(Policy, six.string_types):
Policy = salt.utils.json.dumps(Policy)
conn.put_bucket_policy(Bucket=Bucket, Policy=Policy)
return {'updated': True, 'name': Bucket}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)} | Given a valid config, update the policy for a bucket.
Returns {updated: true} if policy was updated and returns
{updated: False} if policy was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.put_policy my_bucket {...} | Below is the the instruction that describes the task:
### Input:
Given a valid config, update the policy for a bucket.
Returns {updated: true} if policy was updated and returns
{updated: False} if policy was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.put_policy my_bucket {...}
### Response:
def put_policy(Bucket, Policy,
region=None, key=None, keyid=None, profile=None):
'''
Given a valid config, update the policy for a bucket.
Returns {updated: true} if policy was updated and returns
{updated: False} if policy was not updated.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.put_policy my_bucket {...}
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if Policy is None:
Policy = '{}'
elif not isinstance(Policy, six.string_types):
Policy = salt.utils.json.dumps(Policy)
conn.put_bucket_policy(Bucket=Bucket, Policy=Policy)
return {'updated': True, 'name': Bucket}
except ClientError as e:
return {'updated': False, 'error': __utils__['boto3.get_error'](e)} |
def find_view_function(module_name, function_name, fallback_app=None, fallback_template=None, verify_decorator=True):
'''
Finds a view function, class-based view, or template view.
Raises ViewDoesNotExist if not found.
'''
dmp = apps.get_app_config('django_mako_plus')
# I'm first calling find_spec first here beacuse I don't want import_module in
# a try/except -- there are lots of reasons that importing can fail, and I just want to
# know whether the file actually exists. find_spec raises AttributeError if not found.
try:
spec = find_spec(module_name)
except ValueError:
spec = None
if spec is None:
# no view module, so create a view function that directly renders the template
try:
return create_view_for_template(fallback_app, fallback_template)
except TemplateDoesNotExist as e:
raise ViewDoesNotExist('view module {} not found, and fallback template {} could not be loaded ({})'.format(module_name, fallback_template, e))
# load the module and function
try:
module = import_module(module_name)
func = getattr(module, function_name)
func.view_type = 'function'
except ImportError as e:
raise ViewDoesNotExist('module "{}" could not be imported: {}'.format(module_name, e))
except AttributeError as e:
raise ViewDoesNotExist('module "{}" found successfully, but "{}" was not found: {}'.format(module_name, function_name, e))
# if class-based view, call as_view() to get a view function to it
if inspect.isclass(func) and issubclass(func, View):
func = func.as_view()
func.view_type = 'class'
# if regular view function, check the decorator
elif verify_decorator and not view_function.is_decorated(func):
raise ViewDoesNotExist("view {}.{} was found successfully, but it must be decorated with @view_function or be a subclass of django.views.generic.View.".format(module_name, function_name))
# attach a converter to the view function
if dmp.options['PARAMETER_CONVERTER'] is not None:
try:
converter = import_qualified(dmp.options['PARAMETER_CONVERTER'])(func)
setattr(func, CONVERTER_ATTRIBUTE_NAME, converter)
except ImportError as e:
raise ImproperlyConfigured('Cannot find PARAMETER_CONVERTER: {}'.format(str(e)))
# return the function/class
return func | Finds a view function, class-based view, or template view.
Raises ViewDoesNotExist if not found. | Below is the the instruction that describes the task:
### Input:
Finds a view function, class-based view, or template view.
Raises ViewDoesNotExist if not found.
### Response:
def find_view_function(module_name, function_name, fallback_app=None, fallback_template=None, verify_decorator=True):
'''
Finds a view function, class-based view, or template view.
Raises ViewDoesNotExist if not found.
'''
dmp = apps.get_app_config('django_mako_plus')
# I'm first calling find_spec first here beacuse I don't want import_module in
# a try/except -- there are lots of reasons that importing can fail, and I just want to
# know whether the file actually exists. find_spec raises AttributeError if not found.
try:
spec = find_spec(module_name)
except ValueError:
spec = None
if spec is None:
# no view module, so create a view function that directly renders the template
try:
return create_view_for_template(fallback_app, fallback_template)
except TemplateDoesNotExist as e:
raise ViewDoesNotExist('view module {} not found, and fallback template {} could not be loaded ({})'.format(module_name, fallback_template, e))
# load the module and function
try:
module = import_module(module_name)
func = getattr(module, function_name)
func.view_type = 'function'
except ImportError as e:
raise ViewDoesNotExist('module "{}" could not be imported: {}'.format(module_name, e))
except AttributeError as e:
raise ViewDoesNotExist('module "{}" found successfully, but "{}" was not found: {}'.format(module_name, function_name, e))
# if class-based view, call as_view() to get a view function to it
if inspect.isclass(func) and issubclass(func, View):
func = func.as_view()
func.view_type = 'class'
# if regular view function, check the decorator
elif verify_decorator and not view_function.is_decorated(func):
raise ViewDoesNotExist("view {}.{} was found successfully, but it must be decorated with @view_function or be a subclass of django.views.generic.View.".format(module_name, function_name))
# attach a converter to the view function
if dmp.options['PARAMETER_CONVERTER'] is not None:
try:
converter = import_qualified(dmp.options['PARAMETER_CONVERTER'])(func)
setattr(func, CONVERTER_ATTRIBUTE_NAME, converter)
except ImportError as e:
raise ImproperlyConfigured('Cannot find PARAMETER_CONVERTER: {}'.format(str(e)))
# return the function/class
return func |
def get_memory_maps(self):
"""Return process's mapped memory regions as a list of nameduples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
"""
f = None
try:
f = open("/proc/%s/smaps" % self.pid)
first_line = f.readline()
current_block = [first_line]
def get_blocks():
data = {}
for line in f:
fields = line.split(None, 5)
if len(fields) >= 5:
yield (current_block.pop(), data)
current_block.append(line)
else:
data[fields[0]] = int(fields[1]) * 1024
yield (current_block.pop(), data)
if first_line: # smaps file can be empty
for header, data in get_blocks():
hfields = header.split(None, 5)
try:
addr, perms, offset, dev, inode, path = hfields
except ValueError:
addr, perms, offset, dev, inode, path = hfields + ['']
if not path:
path = '[anon]'
else:
path = path.strip()
yield (addr, perms, path,
data['Rss:'],
data['Size:'],
data.get('Pss:', 0),
data['Shared_Clean:'], data['Shared_Clean:'],
data['Private_Clean:'], data['Private_Dirty:'],
data['Referenced:'],
data['Anonymous:'],
data['Swap:'])
f.close()
except EnvironmentError:
# XXX - Can't use wrap_exceptions decorator as we're
# returning a generator; this probably needs some
# refactoring in order to avoid this code duplication.
if f is not None:
f.close()
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
except:
if f is not None:
f.close()
raise | Return process's mapped memory regions as a list of nameduples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo | Below is the the instruction that describes the task:
### Input:
Return process's mapped memory regions as a list of nameduples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
### Response:
def get_memory_maps(self):
"""Return process's mapped memory regions as a list of nameduples.
Fields are explained in 'man proc'; here is an updated (Apr 2012)
version: http://goo.gl/fmebo
"""
f = None
try:
f = open("/proc/%s/smaps" % self.pid)
first_line = f.readline()
current_block = [first_line]
def get_blocks():
data = {}
for line in f:
fields = line.split(None, 5)
if len(fields) >= 5:
yield (current_block.pop(), data)
current_block.append(line)
else:
data[fields[0]] = int(fields[1]) * 1024
yield (current_block.pop(), data)
if first_line: # smaps file can be empty
for header, data in get_blocks():
hfields = header.split(None, 5)
try:
addr, perms, offset, dev, inode, path = hfields
except ValueError:
addr, perms, offset, dev, inode, path = hfields + ['']
if not path:
path = '[anon]'
else:
path = path.strip()
yield (addr, perms, path,
data['Rss:'],
data['Size:'],
data.get('Pss:', 0),
data['Shared_Clean:'], data['Shared_Clean:'],
data['Private_Clean:'], data['Private_Dirty:'],
data['Referenced:'],
data['Anonymous:'],
data['Swap:'])
f.close()
except EnvironmentError:
# XXX - Can't use wrap_exceptions decorator as we're
# returning a generator; this probably needs some
# refactoring in order to avoid this code duplication.
if f is not None:
f.close()
err = sys.exc_info()[1]
if err.errno in (errno.ENOENT, errno.ESRCH):
raise NoSuchProcess(self.pid, self._process_name)
if err.errno in (errno.EPERM, errno.EACCES):
raise AccessDenied(self.pid, self._process_name)
raise
except:
if f is not None:
f.close()
raise |
def load_public_key(vm_):
'''
Load the public key file if exists.
'''
public_key_filename = config.get_cloud_config_value(
'ssh_public_key', vm_, __opts__, search_global=False, default=None
)
if public_key_filename is not None:
public_key_filename = os.path.expanduser(public_key_filename)
if not os.path.isfile(public_key_filename):
raise SaltCloudConfigError(
'The defined ssh_public_key \'{0}\' does not exist'.format(
public_key_filename
)
)
with salt.utils.files.fopen(public_key_filename, 'r') as public_key:
key = salt.utils.stringutils.to_unicode(public_key.read().replace('\n', ''))
return key | Load the public key file if exists. | Below is the the instruction that describes the task:
### Input:
Load the public key file if exists.
### Response:
def load_public_key(vm_):
'''
Load the public key file if exists.
'''
public_key_filename = config.get_cloud_config_value(
'ssh_public_key', vm_, __opts__, search_global=False, default=None
)
if public_key_filename is not None:
public_key_filename = os.path.expanduser(public_key_filename)
if not os.path.isfile(public_key_filename):
raise SaltCloudConfigError(
'The defined ssh_public_key \'{0}\' does not exist'.format(
public_key_filename
)
)
with salt.utils.files.fopen(public_key_filename, 'r') as public_key:
key = salt.utils.stringutils.to_unicode(public_key.read().replace('\n', ''))
return key |
def infer_reference_name(reference_name_or_path):
"""
Given a string containing a reference name (such as a path to
that reference's FASTA file), return its canonical name
as used by Ensembl.
"""
# identify all cases where reference name or path matches candidate aliases
reference_file_name = os.path.basename(reference_name_or_path)
matches = {'file_name': list(), 'full_path': list()}
for assembly_name in reference_alias_dict.keys():
candidate_list = [assembly_name] + reference_alias_dict[assembly_name]
for candidate in candidate_list:
if candidate.lower() in reference_file_name.lower():
matches['file_name'].append(assembly_name)
elif candidate.lower() in reference_name_or_path.lower():
matches['full_path'].append(assembly_name)
# remove duplicate matches (happens due to overlapping aliases)
matches['file_name'] = list(set(matches['file_name']))
matches['full_path'] = list(set(matches['full_path']))
# given set of existing matches, choose one to return
# (first select based on file_name, then full path. If multiples, use most recent)
if len(matches['file_name']) == 1:
match = matches['file_name'][0]
elif len(matches['file_name']) > 1:
# separate logic for >1 vs 1 to give informative warning
match = _most_recent_assembly(matches['file_name'])
warn(
('More than one reference ({}) matches path in header ({}); '
'the most recent one ({}) was used.').format(
','.join(matches['file_name']), reference_file_name, match))
elif len(matches['full_path']) >= 1:
# combine full-path logic since warning is the same
match = _most_recent_assembly(matches['full_path'])
warn((
'Reference could not be matched against filename ({}); '
'using best match against full path ({}).').format(
reference_name_or_path, match))
else:
raise ValueError(
"Failed to infer genome assembly name for %s" % reference_name_or_path)
return match | Given a string containing a reference name (such as a path to
that reference's FASTA file), return its canonical name
as used by Ensembl. | Below is the the instruction that describes the task:
### Input:
Given a string containing a reference name (such as a path to
that reference's FASTA file), return its canonical name
as used by Ensembl.
### Response:
def infer_reference_name(reference_name_or_path):
"""
Given a string containing a reference name (such as a path to
that reference's FASTA file), return its canonical name
as used by Ensembl.
"""
# identify all cases where reference name or path matches candidate aliases
reference_file_name = os.path.basename(reference_name_or_path)
matches = {'file_name': list(), 'full_path': list()}
for assembly_name in reference_alias_dict.keys():
candidate_list = [assembly_name] + reference_alias_dict[assembly_name]
for candidate in candidate_list:
if candidate.lower() in reference_file_name.lower():
matches['file_name'].append(assembly_name)
elif candidate.lower() in reference_name_or_path.lower():
matches['full_path'].append(assembly_name)
# remove duplicate matches (happens due to overlapping aliases)
matches['file_name'] = list(set(matches['file_name']))
matches['full_path'] = list(set(matches['full_path']))
# given set of existing matches, choose one to return
# (first select based on file_name, then full path. If multiples, use most recent)
if len(matches['file_name']) == 1:
match = matches['file_name'][0]
elif len(matches['file_name']) > 1:
# separate logic for >1 vs 1 to give informative warning
match = _most_recent_assembly(matches['file_name'])
warn(
('More than one reference ({}) matches path in header ({}); '
'the most recent one ({}) was used.').format(
','.join(matches['file_name']), reference_file_name, match))
elif len(matches['full_path']) >= 1:
# combine full-path logic since warning is the same
match = _most_recent_assembly(matches['full_path'])
warn((
'Reference could not be matched against filename ({}); '
'using best match against full path ({}).').format(
reference_name_or_path, match))
else:
raise ValueError(
"Failed to infer genome assembly name for %s" % reference_name_or_path)
return match |
def create_country(cls, country, **kwargs):
"""Create Country
Create a new Country
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_country(country, async=True)
>>> result = thread.get()
:param async bool
:param Country country: Attributes of country to create (required)
:return: Country
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_country_with_http_info(country, **kwargs)
else:
(data) = cls._create_country_with_http_info(country, **kwargs)
return data | Create Country
Create a new Country
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_country(country, async=True)
>>> result = thread.get()
:param async bool
:param Country country: Attributes of country to create (required)
:return: Country
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Create Country
Create a new Country
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_country(country, async=True)
>>> result = thread.get()
:param async bool
:param Country country: Attributes of country to create (required)
:return: Country
If the method is called asynchronously,
returns the request thread.
### Response:
def create_country(cls, country, **kwargs):
"""Create Country
Create a new Country
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_country(country, async=True)
>>> result = thread.get()
:param async bool
:param Country country: Attributes of country to create (required)
:return: Country
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._create_country_with_http_info(country, **kwargs)
else:
(data) = cls._create_country_with_http_info(country, **kwargs)
return data |
def add_function(self, function):
"""
Registers the function to the server's default fixed function manager.
"""
#noinspection PyTypeChecker
if not len(self.settings.FUNCTION_MANAGERS):
raise ConfigurationError(
'Where have the default function manager gone?!')
self.settings.FUNCTION_MANAGERS[0].add_function(function) | Registers the function to the server's default fixed function manager. | Below is the the instruction that describes the task:
### Input:
Registers the function to the server's default fixed function manager.
### Response:
def add_function(self, function):
"""
Registers the function to the server's default fixed function manager.
"""
#noinspection PyTypeChecker
if not len(self.settings.FUNCTION_MANAGERS):
raise ConfigurationError(
'Where have the default function manager gone?!')
self.settings.FUNCTION_MANAGERS[0].add_function(function) |
def K2findCampaigns_byname_main(args=None):
"""Exposes K2findCampaigns to the command line."""
parser = argparse.ArgumentParser(
description="Check if a target is "
"(or was) observable by any past or future "
"observing campaign of NASA's K2 mission.")
parser.add_argument('name', nargs=1, type=str,
help="Name of the object. This will be passed on "
"to the CDS name resolver "
"to retrieve coordinate information.")
parser.add_argument('-p', '--plot', action='store_true',
help="Produce a plot showing the target position "
"with respect to all K2 campaigns.")
args = parser.parse_args(args)
targetname = args.name[0]
try:
campaigns, ra, dec = findCampaignsByName(targetname)
except ValueError:
print("Error: could not retrieve coordinates for {0}.".format(targetname))
print("The target may be unknown or there may be a problem "
"connecting to the coordinate server.")
sys.exit(1)
# Print the result
if len(campaigns):
print(Highlight.GREEN +
"Success! {0} is on silicon ".format(targetname) +
"during K2 campaigns {0}.".format(campaigns) +
Highlight.END)
else:
print(Highlight.RED + "Sorry, {} is not on silicon "
"during any K2 campaign.".format(targetname) + Highlight.END)
# Print the pixel positions
for c in campaigns:
printChannelColRow(c, ra, dec)
# Make a context plot if the user requested so
if args.plot:
save_context_plots(ra, dec, targetname=targetname) | Exposes K2findCampaigns to the command line. | Below is the the instruction that describes the task:
### Input:
Exposes K2findCampaigns to the command line.
### Response:
def K2findCampaigns_byname_main(args=None):
"""Exposes K2findCampaigns to the command line."""
parser = argparse.ArgumentParser(
description="Check if a target is "
"(or was) observable by any past or future "
"observing campaign of NASA's K2 mission.")
parser.add_argument('name', nargs=1, type=str,
help="Name of the object. This will be passed on "
"to the CDS name resolver "
"to retrieve coordinate information.")
parser.add_argument('-p', '--plot', action='store_true',
help="Produce a plot showing the target position "
"with respect to all K2 campaigns.")
args = parser.parse_args(args)
targetname = args.name[0]
try:
campaigns, ra, dec = findCampaignsByName(targetname)
except ValueError:
print("Error: could not retrieve coordinates for {0}.".format(targetname))
print("The target may be unknown or there may be a problem "
"connecting to the coordinate server.")
sys.exit(1)
# Print the result
if len(campaigns):
print(Highlight.GREEN +
"Success! {0} is on silicon ".format(targetname) +
"during K2 campaigns {0}.".format(campaigns) +
Highlight.END)
else:
print(Highlight.RED + "Sorry, {} is not on silicon "
"during any K2 campaign.".format(targetname) + Highlight.END)
# Print the pixel positions
for c in campaigns:
printChannelColRow(c, ra, dec)
# Make a context plot if the user requested so
if args.plot:
save_context_plots(ra, dec, targetname=targetname) |
def _ParseFValue(self, registry_key):
"""Parses an F value.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
f_value: F value stored in the Windows Registry key.
Raises:
ParseError: if the Windows Registry key does not contain an F value or
F value cannot be parsed.
"""
registry_value = registry_key.GetValueByName('F')
if not registry_value:
raise errors.ParseError(
'missing value: "F" in Windows Registry key: {0:s}.'.format(
registry_key.name))
f_value_map = self._GetDataTypeMap('f_value')
try:
return self._ReadStructureFromByteStream(
registry_value.data, 0, f_value_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(exception) | Parses an F value.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
f_value: F value stored in the Windows Registry key.
Raises:
ParseError: if the Windows Registry key does not contain an F value or
F value cannot be parsed. | Below is the the instruction that describes the task:
### Input:
Parses an F value.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
f_value: F value stored in the Windows Registry key.
Raises:
ParseError: if the Windows Registry key does not contain an F value or
F value cannot be parsed.
### Response:
def _ParseFValue(self, registry_key):
"""Parses an F value.
Args:
registry_key (dfwinreg.WinRegistryKey): Windows Registry key.
Returns:
f_value: F value stored in the Windows Registry key.
Raises:
ParseError: if the Windows Registry key does not contain an F value or
F value cannot be parsed.
"""
registry_value = registry_key.GetValueByName('F')
if not registry_value:
raise errors.ParseError(
'missing value: "F" in Windows Registry key: {0:s}.'.format(
registry_key.name))
f_value_map = self._GetDataTypeMap('f_value')
try:
return self._ReadStructureFromByteStream(
registry_value.data, 0, f_value_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(exception) |
def base64_user_pass(self):
"""
Composes a basic http auth string, suitable for use with the
_replicator database, and other places that need it.
:returns: Basic http authentication string
"""
if self._username is None or self._password is None:
return None
hash_ = base64.urlsafe_b64encode(bytes_("{username}:{password}".format(
username=self._username,
password=self._password
)))
return "Basic {0}".format(unicode_(hash_)) | Composes a basic http auth string, suitable for use with the
_replicator database, and other places that need it.
:returns: Basic http authentication string | Below is the the instruction that describes the task:
### Input:
Composes a basic http auth string, suitable for use with the
_replicator database, and other places that need it.
:returns: Basic http authentication string
### Response:
def base64_user_pass(self):
"""
Composes a basic http auth string, suitable for use with the
_replicator database, and other places that need it.
:returns: Basic http authentication string
"""
if self._username is None or self._password is None:
return None
hash_ = base64.urlsafe_b64encode(bytes_("{username}:{password}".format(
username=self._username,
password=self._password
)))
return "Basic {0}".format(unicode_(hash_)) |
def enumeration_ask(X, e, bn):
"""Return the conditional probability distribution of variable X
given evidence e, from BayesNet bn. [Fig. 14.9]
>>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
... ).show_approx()
'False: 0.716, True: 0.284'"""
assert X not in e, "Query variable must be distinct from evidence"
Q = ProbDist(X)
for xi in bn.variable_values(X):
Q[xi] = enumerate_all(bn.vars, extend(e, X, xi), bn)
return Q.normalize() | Return the conditional probability distribution of variable X
given evidence e, from BayesNet bn. [Fig. 14.9]
>>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
... ).show_approx()
'False: 0.716, True: 0.284 | Below is the the instruction that describes the task:
### Input:
Return the conditional probability distribution of variable X
given evidence e, from BayesNet bn. [Fig. 14.9]
>>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
... ).show_approx()
'False: 0.716, True: 0.284
### Response:
def enumeration_ask(X, e, bn):
"""Return the conditional probability distribution of variable X
given evidence e, from BayesNet bn. [Fig. 14.9]
>>> enumeration_ask('Burglary', dict(JohnCalls=T, MaryCalls=T), burglary
... ).show_approx()
'False: 0.716, True: 0.284'"""
assert X not in e, "Query variable must be distinct from evidence"
Q = ProbDist(X)
for xi in bn.variable_values(X):
Q[xi] = enumerate_all(bn.vars, extend(e, X, xi), bn)
return Q.normalize() |
def Morsi_Alexander(Re):
r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_ as described in [2]_.
.. math::
C_D = \left\{ \begin{array}{ll}
\frac{24}{Re} & \mbox{if $Re < 0.1$}\\
\frac{22.73}{Re}+\frac{0.0903}{Re^2} + 3.69 & \mbox{if $0.1 < Re < 1$}\\
\frac{29.1667}{Re}-\frac{3.8889}{Re^2} + 1.2220 & \mbox{if $1 < Re < 10$}\\
\frac{46.5}{Re}-\frac{116.67}{Re^2} + 0.6167 & \mbox{if $10 < Re < 100$}\\
\frac{98.33}{Re}-\frac{2778}{Re^2} + 0.3644 & \mbox{if $100 < Re < 1000$}\\
\frac{148.62}{Re}-\frac{4.75\times10^4}{Re^2} + 0.3570 & \mbox{if $1000 < Re < 5000$}\\
\frac{-490.5460}{Re}+\frac{57.87\times10^4}{Re^2} + 0.46 & \mbox{if $5000 < Re < 10000$}\\
\frac{-1662.5}{Re}+\frac{5.4167\times10^6}{Re^2} + 0.5191 & \mbox{if $10000 < Re < 50000$}\end{array} \right.
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 2E5.
Original was reviewed, and confirmed to contain the cited equations.
Examples
--------
>>> Morsi_Alexander(200)
0.7866
References
----------
.. [1] Morsi, S. A., and A. J. Alexander. "An Investigation of Particle
Trajectories in Two-Phase Flow Systems." Journal of Fluid Mechanics
55, no. 02 (September 1972): 193-208. doi:10.1017/S0022112072001806.
.. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045.
'''
if Re < 0.1:
Cd = 24./Re
elif Re < 1:
Cd = 22.73/Re + 0.0903/Re**2 + 3.69
elif Re < 10:
Cd = 29.1667/Re - 3.8889/Re**2 + 1.222
elif Re < 100:
Cd = 46.5/Re - 116.67/Re**2 + 0.6167
elif Re < 1000:
Cd = 98.33/Re - 2778./Re**2 + 0.3644
elif Re < 5000:
Cd = 148.62/Re - 4.75E4/Re**2 + 0.357
elif Re < 10000:
Cd = -490.546/Re + 57.87E4/Re**2 + 0.46
else:
Cd = -1662.5/Re + 5.4167E6/Re**2 + 0.5191
return Cd | r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_ as described in [2]_.
.. math::
C_D = \left\{ \begin{array}{ll}
\frac{24}{Re} & \mbox{if $Re < 0.1$}\\
\frac{22.73}{Re}+\frac{0.0903}{Re^2} + 3.69 & \mbox{if $0.1 < Re < 1$}\\
\frac{29.1667}{Re}-\frac{3.8889}{Re^2} + 1.2220 & \mbox{if $1 < Re < 10$}\\
\frac{46.5}{Re}-\frac{116.67}{Re^2} + 0.6167 & \mbox{if $10 < Re < 100$}\\
\frac{98.33}{Re}-\frac{2778}{Re^2} + 0.3644 & \mbox{if $100 < Re < 1000$}\\
\frac{148.62}{Re}-\frac{4.75\times10^4}{Re^2} + 0.3570 & \mbox{if $1000 < Re < 5000$}\\
\frac{-490.5460}{Re}+\frac{57.87\times10^4}{Re^2} + 0.46 & \mbox{if $5000 < Re < 10000$}\\
\frac{-1662.5}{Re}+\frac{5.4167\times10^6}{Re^2} + 0.5191 & \mbox{if $10000 < Re < 50000$}\end{array} \right.
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 2E5.
Original was reviewed, and confirmed to contain the cited equations.
Examples
--------
>>> Morsi_Alexander(200)
0.7866
References
----------
.. [1] Morsi, S. A., and A. J. Alexander. "An Investigation of Particle
Trajectories in Two-Phase Flow Systems." Journal of Fluid Mechanics
55, no. 02 (September 1972): 193-208. doi:10.1017/S0022112072001806.
.. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045. | Below is the the instruction that describes the task:
### Input:
r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_ as described in [2]_.
.. math::
C_D = \left\{ \begin{array}{ll}
\frac{24}{Re} & \mbox{if $Re < 0.1$}\\
\frac{22.73}{Re}+\frac{0.0903}{Re^2} + 3.69 & \mbox{if $0.1 < Re < 1$}\\
\frac{29.1667}{Re}-\frac{3.8889}{Re^2} + 1.2220 & \mbox{if $1 < Re < 10$}\\
\frac{46.5}{Re}-\frac{116.67}{Re^2} + 0.6167 & \mbox{if $10 < Re < 100$}\\
\frac{98.33}{Re}-\frac{2778}{Re^2} + 0.3644 & \mbox{if $100 < Re < 1000$}\\
\frac{148.62}{Re}-\frac{4.75\times10^4}{Re^2} + 0.3570 & \mbox{if $1000 < Re < 5000$}\\
\frac{-490.5460}{Re}+\frac{57.87\times10^4}{Re^2} + 0.46 & \mbox{if $5000 < Re < 10000$}\\
\frac{-1662.5}{Re}+\frac{5.4167\times10^6}{Re^2} + 0.5191 & \mbox{if $10000 < Re < 50000$}\end{array} \right.
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 2E5.
Original was reviewed, and confirmed to contain the cited equations.
Examples
--------
>>> Morsi_Alexander(200)
0.7866
References
----------
.. [1] Morsi, S. A., and A. J. Alexander. "An Investigation of Particle
Trajectories in Two-Phase Flow Systems." Journal of Fluid Mechanics
55, no. 02 (September 1972): 193-208. doi:10.1017/S0022112072001806.
.. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045.
### Response:
def Morsi_Alexander(Re):
r'''Calculates drag coefficient of a smooth sphere using the method in
[1]_ as described in [2]_.
.. math::
C_D = \left\{ \begin{array}{ll}
\frac{24}{Re} & \mbox{if $Re < 0.1$}\\
\frac{22.73}{Re}+\frac{0.0903}{Re^2} + 3.69 & \mbox{if $0.1 < Re < 1$}\\
\frac{29.1667}{Re}-\frac{3.8889}{Re^2} + 1.2220 & \mbox{if $1 < Re < 10$}\\
\frac{46.5}{Re}-\frac{116.67}{Re^2} + 0.6167 & \mbox{if $10 < Re < 100$}\\
\frac{98.33}{Re}-\frac{2778}{Re^2} + 0.3644 & \mbox{if $100 < Re < 1000$}\\
\frac{148.62}{Re}-\frac{4.75\times10^4}{Re^2} + 0.3570 & \mbox{if $1000 < Re < 5000$}\\
\frac{-490.5460}{Re}+\frac{57.87\times10^4}{Re^2} + 0.46 & \mbox{if $5000 < Re < 10000$}\\
\frac{-1662.5}{Re}+\frac{5.4167\times10^6}{Re^2} + 0.5191 & \mbox{if $10000 < Re < 50000$}\end{array} \right.
Parameters
----------
Re : float
Reynolds number of the sphere, [-]
Returns
-------
Cd : float
Drag coefficient [-]
Notes
-----
Range is Re <= 2E5.
Original was reviewed, and confirmed to contain the cited equations.
Examples
--------
>>> Morsi_Alexander(200)
0.7866
References
----------
.. [1] Morsi, S. A., and A. J. Alexander. "An Investigation of Particle
Trajectories in Two-Phase Flow Systems." Journal of Fluid Mechanics
55, no. 02 (September 1972): 193-208. doi:10.1017/S0022112072001806.
.. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz
Ahmadi. "Development of Empirical Models with High Accuracy for
Estimation of Drag Coefficient of Flow around a Smooth Sphere: An
Evolutionary Approach." Powder Technology 257 (May 2014): 11-19.
doi:10.1016/j.powtec.2014.02.045.
'''
if Re < 0.1:
Cd = 24./Re
elif Re < 1:
Cd = 22.73/Re + 0.0903/Re**2 + 3.69
elif Re < 10:
Cd = 29.1667/Re - 3.8889/Re**2 + 1.222
elif Re < 100:
Cd = 46.5/Re - 116.67/Re**2 + 0.6167
elif Re < 1000:
Cd = 98.33/Re - 2778./Re**2 + 0.3644
elif Re < 5000:
Cd = 148.62/Re - 4.75E4/Re**2 + 0.357
elif Re < 10000:
Cd = -490.546/Re + 57.87E4/Re**2 + 0.46
else:
Cd = -1662.5/Re + 5.4167E6/Re**2 + 0.5191
return Cd |
def diffs_prof(step):
"""Scaled diffusion.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated.
"""
diff, rad = diff_prof(step)
return _scale_prof(step, diff, rad), rad | Scaled diffusion.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated. | Below is the the instruction that describes the task:
### Input:
Scaled diffusion.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated.
### Response:
def diffs_prof(step):
"""Scaled diffusion.
This computation takes sphericity into account if necessary.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
Returns:
tuple of :class:`numpy.array`: the diffusion and the radial position
at which it is evaluated.
"""
diff, rad = diff_prof(step)
return _scale_prof(step, diff, rad), rad |
def delete(self, synchronous=True):
"""Delete the current entity.
Call :meth:`delete_raw` and check for an HTTP 4XX or 5XX response.
Return either the JSON-decoded response or information about a
completed foreman task.
:param synchronous: A boolean. What should happen if the server returns
an HTTP 202 (accepted) status code? Wait for the task to complete
if ``True``. Immediately return a response otherwise.
:returns: A dict. Either the JSON-decoded response or information about
a foreman task.
:raises: ``requests.exceptions.HTTPError`` if the response has an HTTP
4XX or 5XX status code.
:raises: ``ValueError`` If an HTTP 202 response is received and the
response JSON can not be decoded.
:raises nailgun.entity_mixins.TaskTimedOutError: If an HTTP 202
response is received, ``synchronous is True`` and the task times
out.
"""
response = self.delete_raw()
response.raise_for_status()
if (synchronous is True and
response.status_code == http_client.ACCEPTED):
return _poll_task(response.json()['id'], self._server_config)
elif (response.status_code == http_client.NO_CONTENT or
(response.status_code == http_client.OK and
hasattr(response, 'content') and
not response.content.strip())):
# "The server successfully processed the request, but is not
# returning any content. Usually used as a response to a successful
# delete request."
return
return response.json() | Delete the current entity.
Call :meth:`delete_raw` and check for an HTTP 4XX or 5XX response.
Return either the JSON-decoded response or information about a
completed foreman task.
:param synchronous: A boolean. What should happen if the server returns
an HTTP 202 (accepted) status code? Wait for the task to complete
if ``True``. Immediately return a response otherwise.
:returns: A dict. Either the JSON-decoded response or information about
a foreman task.
:raises: ``requests.exceptions.HTTPError`` if the response has an HTTP
4XX or 5XX status code.
:raises: ``ValueError`` If an HTTP 202 response is received and the
response JSON can not be decoded.
:raises nailgun.entity_mixins.TaskTimedOutError: If an HTTP 202
response is received, ``synchronous is True`` and the task times
out. | Below is the the instruction that describes the task:
### Input:
Delete the current entity.
Call :meth:`delete_raw` and check for an HTTP 4XX or 5XX response.
Return either the JSON-decoded response or information about a
completed foreman task.
:param synchronous: A boolean. What should happen if the server returns
an HTTP 202 (accepted) status code? Wait for the task to complete
if ``True``. Immediately return a response otherwise.
:returns: A dict. Either the JSON-decoded response or information about
a foreman task.
:raises: ``requests.exceptions.HTTPError`` if the response has an HTTP
4XX or 5XX status code.
:raises: ``ValueError`` If an HTTP 202 response is received and the
response JSON can not be decoded.
:raises nailgun.entity_mixins.TaskTimedOutError: If an HTTP 202
response is received, ``synchronous is True`` and the task times
out.
### Response:
def delete(self, synchronous=True):
"""Delete the current entity.
Call :meth:`delete_raw` and check for an HTTP 4XX or 5XX response.
Return either the JSON-decoded response or information about a
completed foreman task.
:param synchronous: A boolean. What should happen if the server returns
an HTTP 202 (accepted) status code? Wait for the task to complete
if ``True``. Immediately return a response otherwise.
:returns: A dict. Either the JSON-decoded response or information about
a foreman task.
:raises: ``requests.exceptions.HTTPError`` if the response has an HTTP
4XX or 5XX status code.
:raises: ``ValueError`` If an HTTP 202 response is received and the
response JSON can not be decoded.
:raises nailgun.entity_mixins.TaskTimedOutError: If an HTTP 202
response is received, ``synchronous is True`` and the task times
out.
"""
response = self.delete_raw()
response.raise_for_status()
if (synchronous is True and
response.status_code == http_client.ACCEPTED):
return _poll_task(response.json()['id'], self._server_config)
elif (response.status_code == http_client.NO_CONTENT or
(response.status_code == http_client.OK and
hasattr(response, 'content') and
not response.content.strip())):
# "The server successfully processed the request, but is not
# returning any content. Usually used as a response to a successful
# delete request."
return
return response.json() |
def get_month_start_date(self):
"""Returns the first day of the current month"""
now = timezone.now()
return timezone.datetime(day=1, month=now.month, year=now.year, tzinfo=now.tzinfo) | Returns the first day of the current month | Below is the the instruction that describes the task:
### Input:
Returns the first day of the current month
### Response:
def get_month_start_date(self):
"""Returns the first day of the current month"""
now = timezone.now()
return timezone.datetime(day=1, month=now.month, year=now.year, tzinfo=now.tzinfo) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.