language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def populateDB(db, packages):
"""Populate database with security notice information"""
conn = sqlite3.connect(db)
cur = conn.cursor()
print "opened db successfully"
for vul_id, package in packages.items():
for info in package:
cur.execute('''INSERT INTO packages(name, version, release, vulnerabilityId, OS_name, OS_version)
VALUES(?,?,?,?,?,?);''', (info['name'], info['version'], info['release'], vul_id, info['os_name'], info['os_version']))
conn.commit()
conn.close() | def populateDB(db, packages):
"""Populate database with security notice information"""
conn = sqlite3.connect(db)
cur = conn.cursor()
print "opened db successfully"
for vul_id, package in packages.items():
for info in package:
cur.execute('''INSERT INTO packages(name, version, release, vulnerabilityId, OS_name, OS_version)
VALUES(?,?,?,?,?,?);''', (info['name'], info['version'], info['release'], vul_id, info['os_name'], info['os_version']))
conn.commit()
conn.close() |
Python | def add_target(self, target_name):
"""
Adding a target for iSCSI server.
Args:
Adds the target to existing target list
Returns:
Returns None if successful or exception. Query list_targets
to get updated list
"""
if target_name not in self.list_targets():
self.target_list.append(target_name)
else:
raise iscsi_exceptions.NodeAlreadyInUseException() | def add_target(self, target_name):
"""
Adding a target for iSCSI server.
Args:
Adds the target to existing target list
Returns:
Returns None if successful or exception. Query list_targets
to get updated list
"""
if target_name not in self.list_targets():
self.target_list.append(target_name)
else:
raise iscsi_exceptions.NodeAlreadyInUseException() |
Python | def remove_target(self, target_name):
"""
Removes a target from iSCSI server.
Args:
Name of target that has to be removed.
Returns:
None if successful or exception.
"""
if target_name in self.list_targets():
self.target_list.remove(target_name)
else:
raise iscsi_exceptions.NodeAlreadyUnmappedException() | def remove_target(self, target_name):
"""
Removes a target from iSCSI server.
Args:
Name of target that has to be removed.
Returns:
None if successful or exception.
"""
if target_name in self.list_targets():
self.target_list.remove(target_name)
else:
raise iscsi_exceptions.NodeAlreadyUnmappedException() |
Python | def list_targets(self):
"""
Lists all the targets in current iSCSI server.
Returns:
List of all targets.
"""
return self.target_list | def list_targets(self):
"""
Lists all the targets in current iSCSI server.
Returns:
List of all targets.
"""
return self.target_list |
Python | def show_status(self):
"""
Shows the status of iSCSI server.
Returns:
String which is either "Running" or "Dead"
"""
return self.server_status | def show_status(self):
"""
Shows the status of iSCSI server.
Returns:
String which is either "Running" or "Dead"
"""
return self.server_status |
Python | def runTest(self):
""" Test clone image, clone a parent rbd snapshot into a COW child """
self.fs.clone(CEPH_IMG, CEPH_SNAP_IMG, CEPH_CHILD_IMG)
children = self.fs.list_children(CEPH_IMG, CEPH_SNAP_IMG)
children_imgs = [child[1] for child in children if child[0] == POOL]
self.assertIn(CEPH_CHILD_IMG, children_imgs,
"Error, cannot find child image")
parent_info = self.fs.get_parent_info(CEPH_CHILD_IMG)
self.assertEqual(CEPH_IMG, parent_info[1])
self.assertEqual(CEPH_SNAP_IMG, parent_info[2]) | def runTest(self):
""" Test clone image, clone a parent rbd snapshot into a COW child """
self.fs.clone(CEPH_IMG, CEPH_SNAP_IMG, CEPH_CHILD_IMG)
children = self.fs.list_children(CEPH_IMG, CEPH_SNAP_IMG)
children_imgs = [child[1] for child in children if child[0] == POOL]
self.assertIn(CEPH_CHILD_IMG, children_imgs,
"Error, cannot find child image")
parent_info = self.fs.get_parent_info(CEPH_CHILD_IMG)
self.assertEqual(CEPH_IMG, parent_info[1])
self.assertEqual(CEPH_SNAP_IMG, parent_info[2]) |
Python | def import_ceph_image(self, img):
"""
Import an image from ceph to be used by BMI
Clone an image in ceph to be used by BMI.
:param img: Name of image in ceph
:return: True on successful completion
"""
try:
ceph_img_name = str(img)
# create a snapshot of the golden image and protect it
# this is needed because, in ceph, you can only create clones from
# snapshots.
self.fs.snap_image(ceph_img_name, constants.DEFAULT_SNAPSHOT_NAME)
self.fs.snap_protect(ceph_img_name,
constants.DEFAULT_SNAPSHOT_NAME)
# insert golden image name into bmi db
self.db.image.insert(ceph_img_name, self.pid)
# get a name for our copy of the golden image. For instance an
# image in ceph called centos6.7, after cloning, will be a given
# a name like 4img1 based on the UID in config and image id in db
snap_ceph_name = self.__get_ceph_image_name(ceph_img_name)
# clone the snapshot of the golden image and then flatten it
self.fs.clone(ceph_img_name, constants.DEFAULT_SNAPSHOT_NAME,
snap_ceph_name)
self.fs.flatten(snap_ceph_name)
# create a snapshot of our newly created golden image so that when
# we provision, we can easily make clones from this readily
# available snapshot.
self.fs.snap_image(snap_ceph_name, constants.DEFAULT_SNAPSHOT_NAME)
self.fs.snap_protect(snap_ceph_name,
constants.DEFAULT_SNAPSHOT_NAME)
# unprotect and delete the snapshot of the original golden because
# we no longer need it.
self.fs.snap_unprotect(ceph_img_name,
constants.DEFAULT_SNAPSHOT_NAME)
self.fs.remove_snapshot(ceph_img_name,
constants.DEFAULT_SNAPSHOT_NAME)
return self.__return_success(True)
except (DBException, FileSystemException) as e:
logger.exception('')
return self.__return_error(e) | def import_ceph_image(self, img):
"""
Import an image from ceph to be used by BMI
Clone an image in ceph to be used by BMI.
:param img: Name of image in ceph
:return: True on successful completion
"""
try:
ceph_img_name = str(img)
# create a snapshot of the golden image and protect it
# this is needed because, in ceph, you can only create clones from
# snapshots.
self.fs.snap_image(ceph_img_name, constants.DEFAULT_SNAPSHOT_NAME)
self.fs.snap_protect(ceph_img_name,
constants.DEFAULT_SNAPSHOT_NAME)
# insert golden image name into bmi db
self.db.image.insert(ceph_img_name, self.pid)
# get a name for our copy of the golden image. For instance an
# image in ceph called centos6.7, after cloning, will be a given
# a name like 4img1 based on the UID in config and image id in db
snap_ceph_name = self.__get_ceph_image_name(ceph_img_name)
# clone the snapshot of the golden image and then flatten it
self.fs.clone(ceph_img_name, constants.DEFAULT_SNAPSHOT_NAME,
snap_ceph_name)
self.fs.flatten(snap_ceph_name)
# create a snapshot of our newly created golden image so that when
# we provision, we can easily make clones from this readily
# available snapshot.
self.fs.snap_image(snap_ceph_name, constants.DEFAULT_SNAPSHOT_NAME)
self.fs.snap_protect(snap_ceph_name,
constants.DEFAULT_SNAPSHOT_NAME)
# unprotect and delete the snapshot of the original golden because
# we no longer need it.
self.fs.snap_unprotect(ceph_img_name,
constants.DEFAULT_SNAPSHOT_NAME)
self.fs.remove_snapshot(ceph_img_name,
constants.DEFAULT_SNAPSHOT_NAME)
return self.__return_success(True)
except (DBException, FileSystemException) as e:
logger.exception('')
return self.__return_error(e) |
Python | def import_ceph_snapshot(self, img, snap_name, protect):
"""
Import a snapshot from ceph to be used by BMI
Clone a snapshot in ceph to be used by BMI. Similar to
import_ceph_image except we can directly start the cloning process
because it is already a snapshot.
:param img: Name of snapshot in ceph
:return: True on successful completion
"""
try:
ceph_img_name = str(img)
if protect:
self.fs.snap_protect(ceph_img_name, snap_name)
self.db.image.insert(ceph_img_name, self.pid)
snap_ceph_name = self.__get_ceph_image_name(ceph_img_name)
self.fs.clone(ceph_img_name, snap_name,
snap_ceph_name)
self.fs.flatten(snap_ceph_name)
self.fs.snap_image(snap_ceph_name, constants.DEFAULT_SNAPSHOT_NAME)
self.fs.snap_protect(snap_ceph_name,
constants.DEFAULT_SNAPSHOT_NAME)
return self.__return_success(True)
except (DBException, FileSystemException) as e:
logger.exception('')
return self.__return_error(e) | def import_ceph_snapshot(self, img, snap_name, protect):
"""
Import a snapshot from ceph to be used by BMI
Clone a snapshot in ceph to be used by BMI. Similar to
import_ceph_image except we can directly start the cloning process
because it is already a snapshot.
:param img: Name of snapshot in ceph
:return: True on successful completion
"""
try:
ceph_img_name = str(img)
if protect:
self.fs.snap_protect(ceph_img_name, snap_name)
self.db.image.insert(ceph_img_name, self.pid)
snap_ceph_name = self.__get_ceph_image_name(ceph_img_name)
self.fs.clone(ceph_img_name, snap_name,
snap_ceph_name)
self.fs.flatten(snap_ceph_name)
self.fs.snap_image(snap_ceph_name, constants.DEFAULT_SNAPSHOT_NAME)
self.fs.snap_protect(snap_ceph_name,
constants.DEFAULT_SNAPSHOT_NAME)
return self.__return_success(True)
except (DBException, FileSystemException) as e:
logger.exception('')
return self.__return_error(e) |
Python | def copy_image(self, img1, dest_project, img2=None):
"""
Create a deep copy of src image
:param img1: Name of src image
:param dest_project: Name of the project where des image will be
created
:param img2: Name of des image
:return: True on successful completion
"""
try:
if not self.is_admin and (self.proj != dest_project):
raise AuthorizationFailedException()
dest_pid = self.__does_project_exist(dest_project)
self.db.image.copy_image(self.proj, img1, dest_pid, img2)
if img2 is not None:
ceph_name = self.get_ceph_image_name_from_project(img2,
dest_project)
else:
ceph_name = self.get_ceph_image_name_from_project(img1,
dest_project)
self.fs.clone(self.get_ceph_image_name_from_project(
img1, self.proj),
constants.DEFAULT_SNAPSHOT_NAME,
ceph_name)
self.fs.flatten(ceph_name)
self.fs.snap_image(ceph_name, constants.DEFAULT_SNAPSHOT_NAME)
self.fs.snap_protect(ceph_name, constants.DEFAULT_SNAPSHOT_NAME)
return self.__return_success(True)
except (DBException, FileSystemException) as e:
logger.exception('')
return self.__return_error(e) | def copy_image(self, img1, dest_project, img2=None):
"""
Create a deep copy of src image
:param img1: Name of src image
:param dest_project: Name of the project where des image will be
created
:param img2: Name of des image
:return: True on successful completion
"""
try:
if not self.is_admin and (self.proj != dest_project):
raise AuthorizationFailedException()
dest_pid = self.__does_project_exist(dest_project)
self.db.image.copy_image(self.proj, img1, dest_pid, img2)
if img2 is not None:
ceph_name = self.get_ceph_image_name_from_project(img2,
dest_project)
else:
ceph_name = self.get_ceph_image_name_from_project(img1,
dest_project)
self.fs.clone(self.get_ceph_image_name_from_project(
img1, self.proj),
constants.DEFAULT_SNAPSHOT_NAME,
ceph_name)
self.fs.flatten(ceph_name)
self.fs.snap_image(ceph_name, constants.DEFAULT_SNAPSHOT_NAME)
self.fs.snap_protect(ceph_name, constants.DEFAULT_SNAPSHOT_NAME)
return self.__return_success(True)
except (DBException, FileSystemException) as e:
logger.exception('')
return self.__return_error(e) |
Python | def load(force=False):
"""
Loads the config by using BMI_CONFIG environment variable, else loads from
default location
:param force: Forces a reload of config
:return: None
"""
global __config
if __config is None or force:
try:
path = os.environ[
constants.CONFIG_LOCATION_ENV_VARIABLE]
except KeyError:
path = constants.CONFIG_DEFAULT_LOCATION
__config = BMIConfig(path)
__config.load_config()
parse_config(__config) | def load(force=False):
"""
Loads the config by using BMI_CONFIG environment variable, else loads from
default location
:param force: Forces a reload of config
:return: None
"""
global __config
if __config is None or force:
try:
path = os.environ[
constants.CONFIG_LOCATION_ENV_VARIABLE]
except KeyError:
path = constants.CONFIG_DEFAULT_LOCATION
__config = BMIConfig(path)
__config.load_config()
parse_config(__config) |
Python | def load_config(self):
"""
Loads the config as per the given path
:return: None
"""
if not self.config.read(self.configfile):
raise IOError('cannot load ' + self.configfile) | def load_config(self):
"""
Loads the config as per the given path
:return: None
"""
if not self.config.read(self.configfile):
raise IOError('cannot load ' + self.configfile) |
Python | def option(self, section, option, type=str, required=True):
"""
Parses the given option from config, converts to another type if
required or raises an exception if found missing and adds it as
attribute to config as cfg.section.option
:param section: Section under which Option should be present
:param option: Option should be parsed
:param type: the conversion function for the required type like int,etc
:param required: Whether an exception should be raised if missing
:return: None
"""
try:
value = self.config.get(section, option)
section_obj = getattr(self, section, None)
if section_obj is None:
section_obj = ConfigSection()
setattr(self, section, section_obj)
if type is bool:
v = value.lower()
if v in ['true', 'false']:
setattr(section_obj, option, v == 'true')
else:
raise ValueError
else:
setattr(section_obj, option, type(value))
except ConfigParser.Error:
if required:
raise config_exceptions.MissingOptionInConfigException(option,
section)
except ValueError:
raise config_exceptions.InvalidValueConfigException(option,
section) | def option(self, section, option, type=str, required=True):
"""
Parses the given option from config, converts to another type if
required or raises an exception if found missing and adds it as
attribute to config as cfg.section.option
:param section: Section under which Option should be present
:param option: Option should be parsed
:param type: the conversion function for the required type like int,etc
:param required: Whether an exception should be raised if missing
:return: None
"""
try:
value = self.config.get(section, option)
section_obj = getattr(self, section, None)
if section_obj is None:
section_obj = ConfigSection()
setattr(self, section, section_obj)
if type is bool:
v = value.lower()
if v in ['true', 'false']:
setattr(section_obj, option, v == 'true')
else:
raise ValueError
else:
setattr(section_obj, option, type(value))
except ConfigParser.Error:
if required:
raise config_exceptions.MissingOptionInConfigException(option,
section)
except ValueError:
raise config_exceptions.InvalidValueConfigException(option,
section) |
Python | def section(self, section_name, required=True):
"""
Parses entire section and adds them to config object like option
:param section_name: the section to parse
:param required: Whether an exception should be raised if missing
:return: None
"""
try:
section = self.config.items(section_name)
section_obj = getattr(self, section_name, None)
if section_obj is None:
section_obj = ConfigSection()
setattr(self, section_name, section_obj)
for name, value in section:
setattr(section_obj, name, value)
except ConfigParser.Error:
if required:
raise config_exceptions.MissingSectionInConfigException(
section_name) | def section(self, section_name, required=True):
"""
Parses entire section and adds them to config object like option
:param section_name: the section to parse
:param required: Whether an exception should be raised if missing
:return: None
"""
try:
section = self.config.items(section_name)
section_obj = getattr(self, section_name, None)
if section_obj is None:
section_obj = ConfigSection()
setattr(self, section_name, section_obj)
for name, value in section:
setattr(section_obj, name, value)
except ConfigParser.Error:
if required:
raise config_exceptions.MissingSectionInConfigException(
section_name) |
Python | def call(command, sudo=False):
"""
Executes the given command in shell
:param command: the command to execute as string
:param sudo: whether to execute as root (default is False)
:return: output of command as string
"""
s_command = command.split()
if sudo:
s_command.insert(0, 'sudo')
try:
output = subprocess.check_output(s_command, stderr=subprocess.STDOUT)
return output
except subprocess.CalledProcessError as e:
raise shell_exceptions.CommandFailedException(str(e)) | def call(command, sudo=False):
"""
Executes the given command in shell
:param command: the command to execute as string
:param sudo: whether to execute as root (default is False)
:return: output of command as string
"""
s_command = command.split()
if sudo:
s_command.insert(0, 'sudo')
try:
output = subprocess.check_output(s_command, stderr=subprocess.STDOUT)
return output
except subprocess.CalledProcessError as e:
raise shell_exceptions.CommandFailedException(str(e)) |
Python | def call_service_command(command, service_name, final_status=None):
"""
Calls the given service command and checks whether it has status afterwards
:param command: service command to call
:param service_name: The service name
:param final_status: The final status of the daemon after call
:return: Output of Call
"""
full_command = "service %s %s" % (service_name, command)
output = call(full_command, sudo=True)
if final_status is not None:
status = get_service_status(service_name)
if status is not final_status:
raise shell_exceptions.ServiceCommandFailedException(status)
return output | def call_service_command(command, service_name, final_status=None):
"""
Calls the given service command and checks whether it has status afterwards
:param command: service command to call
:param service_name: The service name
:param final_status: The final status of the daemon after call
:return: Output of Call
"""
full_command = "service %s %s" % (service_name, command)
output = call(full_command, sudo=True)
if final_status is not None:
status = get_service_status(service_name)
if status is not final_status:
raise shell_exceptions.ServiceCommandFailedException(status)
return output |
Python | def list_children(self, img_id, parent_snap):
"""
The snapshot of image whose children will be returned.
Used only by tests.
:param img_id: what the image is called
:param parent_snap: the snapshot to read from
:return: a list of (pool name, image name) tuples
"""
try:
with self.__open_image(img_id) as img:
img.set_snap(parent_snap)
return img.list_children()
except rbd.ImageNotFound:
raise file_system_exceptions.ImageNotFoundException(img_id) | def list_children(self, img_id, parent_snap):
"""
The snapshot of image whose children will be returned.
Used only by tests.
:param img_id: what the image is called
:param parent_snap: the snapshot to read from
:return: a list of (pool name, image name) tuples
"""
try:
with self.__open_image(img_id) as img:
img.set_snap(parent_snap)
return img.list_children()
except rbd.ImageNotFound:
raise file_system_exceptions.ImageNotFoundException(img_id) |
Python | def is_snap_protected(self, img_id, snap_name):
"""
Find out whether a snapshot is protected from deletion
Required only for tests
:param img_id: what the image is called
:param snap_name: the snapshot to check
:return: bool- whether the snapshot is protected
"""
try:
with self.__open_image(img_id) as img:
return img.is_protected_snap(snap_name)
except rbd.ImageNotFound:
raise file_system_exceptions.ImageNotFoundException(img_id) | def is_snap_protected(self, img_id, snap_name):
"""
Find out whether a snapshot is protected from deletion
Required only for tests
:param img_id: what the image is called
:param snap_name: the snapshot to check
:return: bool- whether the snapshot is protected
"""
try:
with self.__open_image(img_id) as img:
return img.is_protected_snap(snap_name)
except rbd.ImageNotFound:
raise file_system_exceptions.ImageNotFoundException(img_id) |
Python | def open_page (url):
"""Load page, and try to accept cookies and close pop-ups
"""
scraper.set_page (url)
time.sleep (5)
scraper.refresh_selenium()
# Detect pop up logging
selector_popup_close = "#popupContainer > div:nth-child(1) > img"
popup_found = scraper.get_attrib (selector_popup_close, "src")
if popup_found:
scraper.click (selector_popup_close)
scraper.refresh_selenium()
# Detect pop up discounts
selector_popup_close = "#st_news_letter_popup_3 > div > div > button"
popup_found = scraper.get_attrib (selector_popup_close, "aria-label")
if popup_found:
scraper.click (selector_popup_close)
scraper.refresh_selenium()
# Accept cookies
selector_accept = "#lgcookieslaw_accept"
popup_found = scraper.get_text (selector_accept)
if popup_found:
scraper.click (selector_accept)
scraper.refresh_selenium() | def open_page (url):
"""Load page, and try to accept cookies and close pop-ups
"""
scraper.set_page (url)
time.sleep (5)
scraper.refresh_selenium()
# Detect pop up logging
selector_popup_close = "#popupContainer > div:nth-child(1) > img"
popup_found = scraper.get_attrib (selector_popup_close, "src")
if popup_found:
scraper.click (selector_popup_close)
scraper.refresh_selenium()
# Detect pop up discounts
selector_popup_close = "#st_news_letter_popup_3 > div > div > button"
popup_found = scraper.get_attrib (selector_popup_close, "aria-label")
if popup_found:
scraper.click (selector_popup_close)
scraper.refresh_selenium()
# Accept cookies
selector_accept = "#lgcookieslaw_accept"
popup_found = scraper.get_text (selector_accept)
if popup_found:
scraper.click (selector_accept)
scraper.refresh_selenium() |
Python | def read_arrow_batches_from_odbc(
query: str,
batch_size: int,
connection_string: str,
user: Optional[str] = None,
password: Optional[str] = None,
parameters: Optional[List[Optional[str]]] = None,
max_text_size: Optional[int] = None,
max_binary_size: Optional[int] = None,
falliable_allocations: bool = True,
) -> Optional[BatchReader]:
"""
Execute the query and read the result as an iterator over Arrow batches.
:param query: The SQL statement yielding the result set which is converted into arrow record
batches.
:param batch_size: The maxmium number rows within each batch.
:param connection_string: ODBC Connection string used to connect to the data source. To find a
connection string for your data source try https://www.connectionstrings.com/.
:param user: Allows for specifying the user seperatly from the connection string if it is not
already part of it. The value will eventually be escaped and attached to the connection
string as `UID`.
:param password: Allows for specifying the password seperatly from the connection string if it
is not already part of it. The value will eventually be escaped and attached to the
connection string as `PWD`.
:param parameters: ODBC allows you to use a question mark as placeholder marker (``?``) for
positional parameters. This argument takes a list of parameters those number must match the
number of placholders in the SQL statement. Using this instead of literals helps you avoid
SQL injections or may otherwise simplify your code. Currently all parameters are passed as
VARCHAR strings. You can use `None` to pass `NULL`.
:param max_text_size: An upper limit for the size of buffers bound to variadic text columns of
the data source. This limit does not (directly) apply to the size of the created arrow
buffers, but rather applies to the buffers used for the data in transit. Use this option if
you have e.g. VARCHAR(MAX) fields in your database schema. In such a case without an upper
limit, the ODBC driver of your data source is asked for the maximum size of an element, and
is likely to answer with either 0 or a value which is way larger than any actual entry in
the column If you can not adapt your database schema, this limit might be what you are
looking for. On windows systems the size is double words (16Bit), as windows utilizes an
UTF-16 encoding. So this translates to roughly the size in letters. On non windows systems
this is the size in bytes and the datasource is assumed to utilize an UTF-8 encoding.
``None`` means no upper limit is set and the maximum element size, reported by ODBC is used
to determine buffer sizes.
:param max_binary_size: An upper limit for the size of buffers bound to variadic binary columns
of the data source. This limit does not (directly) apply to the size of the created arrow
buffers, but rather applies to the buffers used for the data in transit. Use this option if
you have e.g. VARBINARY(MAX) fields in your database schema. In such a case without an upper
limit, the ODBC driver of your data source is asked for the maximum size of an element, and
is likely to answer with either 0 or a value which is way larger than any actual entry in
the column. If you can not adapt your database schema, this limit might be what you are
looking for. This is the maximum size in bytes of the binary column.
:param falliable_allocations: If ``True`` an recoverable error is raised in case there is not
enough memory to allocate the buffers. This option may incurr a performance penalty which
scales with the batch size parameter (but not with the amount of actual data in the source).
In case you can test your query against the schema you can safely set this to ``False``. The
required memory will not depend on the amount of data in the data source. Default is
``True`` though, safety first.
:return: In case the query does not produce a result set (e.g. in case of an INSERT statement),
``None`` is returned. Should the statement return a result set a ``BatchReader`` is
returned, which implements the iterator protocol and iterates over individual arrow batches.
"""
query_bytes = query.encode("utf-8")
connection = connect_to_database(connection_string, user, password)
# Connecting to the database has been successful. Note that connection does not truly take
# ownership of the connection. If it runs out of scope (e.g. due to a raised exception) the
# connection would not be closed and its associated resources would not be freed.
# However, this is fine since everything from here on out until we call arrow_odbc_reader_make
# is infalliable. arrow_odbc_reader_make will truly take ownership of the connection. Even if it
# should fail, it will be closed correctly.
if parameters is None:
parameters_array = FFI.NULL
parameters_len = 0
encoded_parameters = []
else:
parameters_array = ffi.new("ArrowOdbcParameter *[]", len(parameters))
parameters_len = len(parameters)
# Must be kept alive. Within Rust code we only allocate an additional
# indicator the string payload is just referenced.
encoded_parameters = [to_bytes_and_len(p) for p in parameters]
if max_text_size is None:
max_text_size = 0
if max_binary_size is None:
max_binary_size = 0
for p_index in range(0, parameters_len):
(p_bytes, p_len) = encoded_parameters[p_index]
parameters_array[p_index] = lib.arrow_odbc_parameter_string_make(p_bytes, p_len)
reader_out = ffi.new("ArrowOdbcReader **")
error = lib.arrow_odbc_reader_make(
connection,
query_bytes,
len(query_bytes),
batch_size,
parameters_array,
parameters_len,
max_text_size,
max_binary_size,
falliable_allocations,
reader_out,
)
# See if we managed to execute the query successfully and return an
# error if not
raise_on_error(error)
reader = reader_out[0]
if reader == ffi.NULL:
# The query ran successfully but did not produce a result set
return None
else:
return BatchReader(reader) | def read_arrow_batches_from_odbc(
query: str,
batch_size: int,
connection_string: str,
user: Optional[str] = None,
password: Optional[str] = None,
parameters: Optional[List[Optional[str]]] = None,
max_text_size: Optional[int] = None,
max_binary_size: Optional[int] = None,
falliable_allocations: bool = True,
) -> Optional[BatchReader]:
"""
Execute the query and read the result as an iterator over Arrow batches.
:param query: The SQL statement yielding the result set which is converted into arrow record
batches.
:param batch_size: The maxmium number rows within each batch.
:param connection_string: ODBC Connection string used to connect to the data source. To find a
connection string for your data source try https://www.connectionstrings.com/.
:param user: Allows for specifying the user seperatly from the connection string if it is not
already part of it. The value will eventually be escaped and attached to the connection
string as `UID`.
:param password: Allows for specifying the password seperatly from the connection string if it
is not already part of it. The value will eventually be escaped and attached to the
connection string as `PWD`.
:param parameters: ODBC allows you to use a question mark as placeholder marker (``?``) for
positional parameters. This argument takes a list of parameters those number must match the
number of placholders in the SQL statement. Using this instead of literals helps you avoid
SQL injections or may otherwise simplify your code. Currently all parameters are passed as
VARCHAR strings. You can use `None` to pass `NULL`.
:param max_text_size: An upper limit for the size of buffers bound to variadic text columns of
the data source. This limit does not (directly) apply to the size of the created arrow
buffers, but rather applies to the buffers used for the data in transit. Use this option if
you have e.g. VARCHAR(MAX) fields in your database schema. In such a case without an upper
limit, the ODBC driver of your data source is asked for the maximum size of an element, and
is likely to answer with either 0 or a value which is way larger than any actual entry in
the column If you can not adapt your database schema, this limit might be what you are
looking for. On windows systems the size is double words (16Bit), as windows utilizes an
UTF-16 encoding. So this translates to roughly the size in letters. On non windows systems
this is the size in bytes and the datasource is assumed to utilize an UTF-8 encoding.
``None`` means no upper limit is set and the maximum element size, reported by ODBC is used
to determine buffer sizes.
:param max_binary_size: An upper limit for the size of buffers bound to variadic binary columns
of the data source. This limit does not (directly) apply to the size of the created arrow
buffers, but rather applies to the buffers used for the data in transit. Use this option if
you have e.g. VARBINARY(MAX) fields in your database schema. In such a case without an upper
limit, the ODBC driver of your data source is asked for the maximum size of an element, and
is likely to answer with either 0 or a value which is way larger than any actual entry in
the column. If you can not adapt your database schema, this limit might be what you are
looking for. This is the maximum size in bytes of the binary column.
:param falliable_allocations: If ``True`` an recoverable error is raised in case there is not
enough memory to allocate the buffers. This option may incurr a performance penalty which
scales with the batch size parameter (but not with the amount of actual data in the source).
In case you can test your query against the schema you can safely set this to ``False``. The
required memory will not depend on the amount of data in the data source. Default is
``True`` though, safety first.
:return: In case the query does not produce a result set (e.g. in case of an INSERT statement),
``None`` is returned. Should the statement return a result set a ``BatchReader`` is
returned, which implements the iterator protocol and iterates over individual arrow batches.
"""
query_bytes = query.encode("utf-8")
connection = connect_to_database(connection_string, user, password)
# Connecting to the database has been successful. Note that connection does not truly take
# ownership of the connection. If it runs out of scope (e.g. due to a raised exception) the
# connection would not be closed and its associated resources would not be freed.
# However, this is fine since everything from here on out until we call arrow_odbc_reader_make
# is infalliable. arrow_odbc_reader_make will truly take ownership of the connection. Even if it
# should fail, it will be closed correctly.
if parameters is None:
parameters_array = FFI.NULL
parameters_len = 0
encoded_parameters = []
else:
parameters_array = ffi.new("ArrowOdbcParameter *[]", len(parameters))
parameters_len = len(parameters)
# Must be kept alive. Within Rust code we only allocate an additional
# indicator the string payload is just referenced.
encoded_parameters = [to_bytes_and_len(p) for p in parameters]
if max_text_size is None:
max_text_size = 0
if max_binary_size is None:
max_binary_size = 0
for p_index in range(0, parameters_len):
(p_bytes, p_len) = encoded_parameters[p_index]
parameters_array[p_index] = lib.arrow_odbc_parameter_string_make(p_bytes, p_len)
reader_out = ffi.new("ArrowOdbcReader **")
error = lib.arrow_odbc_reader_make(
connection,
query_bytes,
len(query_bytes),
batch_size,
parameters_array,
parameters_len,
max_text_size,
max_binary_size,
falliable_allocations,
reader_out,
)
# See if we managed to execute the query successfully and return an
# error if not
raise_on_error(error)
reader = reader_out[0]
if reader == ffi.NULL:
# The query ran successfully but did not produce a result set
return None
else:
return BatchReader(reader) |
Python | def write_batch(self, batch):
"""
Fills the internal buffers of the writer with data from the batch. Every
time they are full, the data is send to the database. To make sure all
the data is is send ``flush`` must be called.
"""
with arrow_ffi.new("struct ArrowArray*") as c_array, \
arrow_ffi.new("struct ArrowSchema*") as c_schema:
# Get the references to the C Data structures
c_array_ptr = int(arrow_ffi.cast("uintptr_t", c_array))
c_schema_ptr = int(arrow_ffi.cast("uintptr_t", c_schema))
# Export the Array to the C Data structures.
batch._export_to_c(c_array_ptr)
batch.schema._export_to_c(c_schema_ptr)
lib.arrow_odbc_writer_write_batch(self.handle, c_array, c_schema) | def write_batch(self, batch):
"""
Fills the internal buffers of the writer with data from the batch. Every
time they are full, the data is send to the database. To make sure all
the data is is send ``flush`` must be called.
"""
with arrow_ffi.new("struct ArrowArray*") as c_array, \
arrow_ffi.new("struct ArrowSchema*") as c_schema:
# Get the references to the C Data structures
c_array_ptr = int(arrow_ffi.cast("uintptr_t", c_array))
c_schema_ptr = int(arrow_ffi.cast("uintptr_t", c_schema))
# Export the Array to the C Data structures.
batch._export_to_c(c_array_ptr)
batch.schema._export_to_c(c_schema_ptr)
lib.arrow_odbc_writer_write_batch(self.handle, c_array, c_schema) |
Python | def flush(self):
"""
Inserts the remaining rows of the last chunk to the database.
"""
lib.arrow_odbc_writer_flush(self.handle) | def flush(self):
"""
Inserts the remaining rows of the last chunk to the database.
"""
lib.arrow_odbc_writer_flush(self.handle) |
Python | def insert_into_table(
reader: Any,
chunk_size: int,
table: str,
connection_string: str,
user: Optional[str] = None,
password: Optional[str] = None,
):
"""
Consume the batches in the reader and insert them into a table on the database.
:param reader: Reader is used to iterate over record batches. It must expose a `schema`
attribute, referencing an Arrow schema. Each field in the schema must correspond to a
column in the table with identical name.
:param chunk_size: Number of records to insert in each roundtrip to the database. Independent of
batch size (i.e. number of rows in an individual record batch).
:param table: Name of a database table to insert into. Used to generate the insert statement for
the bulk writer.
:param connection_string: ODBC Connection string used to connect to the data source. To find a
connection string for your data source try https://www.connectionstrings.com/.
:param user: Allows for specifying the user seperatly from the connection string if it is not
already part of it. The value will eventually be escaped and attached to the connection
string as `UID`.
:param password: Allows for specifying the password seperatly from the connection string if it
is not already part of it. The value will eventually be escaped and attached to the
connection string as `PWD`.
"""
table_bytes = table.encode("utf-8")
# Allocate structures where we will export the Array data and the Array schema. They will be
# released when we exit the with block.
with arrow_ffi.new("struct ArrowSchema*") as c_schema:
# Get the references to the C Data structures.
c_schema_ptr = int(arrow_ffi.cast("uintptr_t", c_schema))
# Export the schema to the C Data structures.
reader.schema._export_to_c(c_schema_ptr)
connection = connect_to_database(connection_string, user, password)
# Connecting to the database has been successful. Note that connection does not truly take
# ownership of the connection. If it runs out of scope (e.g. due to a raised exception) the
# connection would not be closed and its associated resources would not be freed. However
# `arrow_odbc_writer_make` will take ownership of connection. Even if it should fail the
# connection will be closed.
writer_out = ffi.new("ArrowOdbcWriter **")
lib.arrow_odbc_writer_make(
connection, table_bytes, len(table_bytes), chunk_size, c_schema, writer_out
)
writer = BatchWriter(writer_out[0])
# Write all batches in reader
for batch in reader:
writer.write_batch(batch)
writer.flush() | def insert_into_table(
reader: Any,
chunk_size: int,
table: str,
connection_string: str,
user: Optional[str] = None,
password: Optional[str] = None,
):
"""
Consume the batches in the reader and insert them into a table on the database.
:param reader: Reader is used to iterate over record batches. It must expose a `schema`
attribute, referencing an Arrow schema. Each field in the schema must correspond to a
column in the table with identical name.
:param chunk_size: Number of records to insert in each roundtrip to the database. Independent of
batch size (i.e. number of rows in an individual record batch).
:param table: Name of a database table to insert into. Used to generate the insert statement for
the bulk writer.
:param connection_string: ODBC Connection string used to connect to the data source. To find a
connection string for your data source try https://www.connectionstrings.com/.
:param user: Allows for specifying the user seperatly from the connection string if it is not
already part of it. The value will eventually be escaped and attached to the connection
string as `UID`.
:param password: Allows for specifying the password seperatly from the connection string if it
is not already part of it. The value will eventually be escaped and attached to the
connection string as `PWD`.
"""
table_bytes = table.encode("utf-8")
# Allocate structures where we will export the Array data and the Array schema. They will be
# released when we exit the with block.
with arrow_ffi.new("struct ArrowSchema*") as c_schema:
# Get the references to the C Data structures.
c_schema_ptr = int(arrow_ffi.cast("uintptr_t", c_schema))
# Export the schema to the C Data structures.
reader.schema._export_to_c(c_schema_ptr)
connection = connect_to_database(connection_string, user, password)
# Connecting to the database has been successful. Note that connection does not truly take
# ownership of the connection. If it runs out of scope (e.g. due to a raised exception) the
# connection would not be closed and its associated resources would not be freed. However
# `arrow_odbc_writer_make` will take ownership of connection. Even if it should fail the
# connection will be closed.
writer_out = ffi.new("ArrowOdbcWriter **")
lib.arrow_odbc_writer_make(
connection, table_bytes, len(table_bytes), chunk_size, c_schema, writer_out
)
writer = BatchWriter(writer_out[0])
# Write all batches in reader
for batch in reader:
writer.write_batch(batch)
writer.flush() |
Python | def register_device_with_private_key(device_id, realm, private_key_file,
pairing_base_url) -> str:
"""
Registers a Device against an Astarte instance/realm with a Private Key
Returns the Credentials secret for the Device
Parameters
----------
device_id : str
The Device ID to register.
realm : str
The Realm in which to register the Device.
private_key_file : str
Path to the Private Key file for the Realm. It will be used to Authenticate against Pairing API.
pairing_base_url : str
The Base URL of Pairing API of the Astarte Instance the Device will be registered in.
"""
return __register_device(
device_id, realm,
__register_device_headers_with_private_key(private_key_file),
pairing_base_url) | def register_device_with_private_key(device_id, realm, private_key_file,
pairing_base_url) -> str:
"""
Registers a Device against an Astarte instance/realm with a Private Key
Returns the Credentials secret for the Device
Parameters
----------
device_id : str
The Device ID to register.
realm : str
The Realm in which to register the Device.
private_key_file : str
Path to the Private Key file for the Realm. It will be used to Authenticate against Pairing API.
pairing_base_url : str
The Base URL of Pairing API of the Astarte Instance the Device will be registered in.
"""
return __register_device(
device_id, realm,
__register_device_headers_with_private_key(private_key_file),
pairing_base_url) |
Python | def register_device_with_jwt_token(device_id, realm, jwt_token,
pairing_base_url) -> str:
"""
Registers a Device against an Astarte instance/realm with a JWT Token
Returns the Credentials secret for the Device
Parameters
----------
device_id : str
The Device ID to register.
realm : str
The Realm in which to register the Device.
jwt_token : str
A JWT Token to Authenticate against Pairing API. The token must have access to Pairing API and to the agent API paths.
pairing_base_url : str
The Base URL of Pairing API of the Astarte Instance the Device will be registered in.
"""
return __register_device(
device_id, realm, __register_device_headers_with_jwt_token(jwt_token),
pairing_base_url) | def register_device_with_jwt_token(device_id, realm, jwt_token,
pairing_base_url) -> str:
"""
Registers a Device against an Astarte instance/realm with a JWT Token
Returns the Credentials secret for the Device
Parameters
----------
device_id : str
The Device ID to register.
realm : str
The Realm in which to register the Device.
jwt_token : str
A JWT Token to Authenticate against Pairing API. The token must have access to Pairing API and to the agent API paths.
pairing_base_url : str
The Base URL of Pairing API of the Astarte Instance the Device will be registered in.
"""
return __register_device(
device_id, realm, __register_device_headers_with_jwt_token(jwt_token),
pairing_base_url) |
Python | def add_interface(self, interface_definition):
"""
Adds an Interface to the Device
This will add an Interface definition to the Device. It has to be called before :py:func:`connect`, as it will be
used for building the Device Introspection.
Parameters
----------
interface_definition : dict
An Astarte Interface definition in the form of a Python dictionary. Usually obtained by using json.loads on an Interface file.
"""
self.__interfaces[
interface_definition["interface_name"]] = interface_definition | def add_interface(self, interface_definition):
"""
Adds an Interface to the Device
This will add an Interface definition to the Device. It has to be called before :py:func:`connect`, as it will be
used for building the Device Introspection.
Parameters
----------
interface_definition : dict
An Astarte Interface definition in the form of a Python dictionary. Usually obtained by using json.loads on an Interface file.
"""
self.__interfaces[
interface_definition["interface_name"]] = interface_definition |
Python | def remove_interface(self, interface_name):
"""
Removes an Interface from the Device
Removes an Interface definition from the Device. It has to be called before :py:func:`connect`, as it will be
used for building the Device Introspection.
Parameters
----------
interface_name : dict
The name of an Interface previously added with :py:func:`add_interface`.
"""
if interface_name in self.__interfaces:
del self.__interfaces[interface_name] | def remove_interface(self, interface_name):
"""
Removes an Interface from the Device
Removes an Interface definition from the Device. It has to be called before :py:func:`connect`, as it will be
used for building the Device Introspection.
Parameters
----------
interface_name : dict
The name of an Interface previously added with :py:func:`add_interface`.
"""
if interface_name in self.__interfaces:
del self.__interfaces[interface_name] |
Python | def connect(self):
"""
Connects the Device asynchronously.
When calling connect, a new connection thread is spawned and the Device will start a connection routine.
The function might return before the Device connects: you want to use the on_connected callback to ensure
you are notified upon connection.
In case the Device gets disconnected unexpectedly, it will try to reconnect indefinitely until disconnect()
is called.
"""
if self.__is_connected:
return
if not self.__is_crypto_setup:
self.__setup_crypto()
transport_info = pairing_handler.obtain_device_transport_information(
self.__device_id, self.__realm, self.__credentials_secret,
self.__pairing_base_url)
broker_url = ""
# We support only MQTTv1
for transport, transport_data in transport_info["protocols"].items():
if transport != "astarte_mqtt_v1":
continue
# Get the Broker URL
broker_url = transport_data["broker_url"]
# Grab the URL components we care about
parsed_url = urlparse(broker_url)
self.__mqtt_client.connect_async(parsed_url.hostname, parsed_url.port)
self.__mqtt_client.loop_start() | def connect(self):
"""
Connects the Device asynchronously.
When calling connect, a new connection thread is spawned and the Device will start a connection routine.
The function might return before the Device connects: you want to use the on_connected callback to ensure
you are notified upon connection.
In case the Device gets disconnected unexpectedly, it will try to reconnect indefinitely until disconnect()
is called.
"""
if self.__is_connected:
return
if not self.__is_crypto_setup:
self.__setup_crypto()
transport_info = pairing_handler.obtain_device_transport_information(
self.__device_id, self.__realm, self.__credentials_secret,
self.__pairing_base_url)
broker_url = ""
# We support only MQTTv1
for transport, transport_data in transport_info["protocols"].items():
if transport != "astarte_mqtt_v1":
continue
# Get the Broker URL
broker_url = transport_data["broker_url"]
# Grab the URL components we care about
parsed_url = urlparse(broker_url)
self.__mqtt_client.connect_async(parsed_url.hostname, parsed_url.port)
self.__mqtt_client.loop_start() |
Python | def disconnect(self):
"""
Disconnects the Device asynchronously.
When calling disconnect, the connection thread is requested to terminate the disconnection, and the thread is stopped
when the disconnection happens.
The function might return before the Device connects: you want to use the on_disconnected callback to ensure
you are notified upon connection. When doing so, check the return code parameter: if it is 0, it means the disconnection
happened following an explicit disconnection request.
"""
if not self.__is_connected:
return
self.__mqtt_client.disconnect() | def disconnect(self):
"""
Disconnects the Device asynchronously.
When calling disconnect, the connection thread is requested to terminate the disconnection, and the thread is stopped
when the disconnection happens.
The function might return before the Device connects: you want to use the on_disconnected callback to ensure
you are notified upon connection. When doing so, check the return code parameter: if it is 0, it means the disconnection
happened following an explicit disconnection request.
"""
if not self.__is_connected:
return
self.__mqtt_client.disconnect() |
Python | def is_connected(self) -> bool:
"""
Returns whether the Device is currently connected.
"""
return self.__is_connected | def is_connected(self) -> bool:
"""
Returns whether the Device is currently connected.
"""
return self.__is_connected |
Python | def send(self, interface_name, interface_path, payload, timestamp=None):
"""
Sends an individual message to an interface.
Parameters
----------
interface_name : str
The name of an the Interface to send data to.
interface_path : str
The path on the Interface to send data to.
payload : object
The value to be sent. The type should be compatible to the one specified in the interface path.
timestamp : datetime, optional
If sending a Datastream with explicit_timestamp, you can specify a datetime object which will be registered
as the timestamp for the value.
"""
if self.__is_interface_aggregate(interface_name):
raise Exception(
f'Interface {interface_name} is an aggregate interface. You should use send_aggregate.'
)
# TODO: validate paths
object_payload = {'v': payload}
if timestamp:
object_payload['t'] = timestamp
self.__send_generic(
f'{self.__get_base_topic()}/{interface_name}{interface_path}',
object_payload) | def send(self, interface_name, interface_path, payload, timestamp=None):
"""
Sends an individual message to an interface.
Parameters
----------
interface_name : str
The name of an the Interface to send data to.
interface_path : str
The path on the Interface to send data to.
payload : object
The value to be sent. The type should be compatible to the one specified in the interface path.
timestamp : datetime, optional
If sending a Datastream with explicit_timestamp, you can specify a datetime object which will be registered
as the timestamp for the value.
"""
if self.__is_interface_aggregate(interface_name):
raise Exception(
f'Interface {interface_name} is an aggregate interface. You should use send_aggregate.'
)
# TODO: validate paths
object_payload = {'v': payload}
if timestamp:
object_payload['t'] = timestamp
self.__send_generic(
f'{self.__get_base_topic()}/{interface_name}{interface_path}',
object_payload) |
Python | def send_aggregate(self,
interface_name,
payload,
timestamp=None):
"""
Sends an aggregate message to an interface
Parameters
----------
interface_name : str
The name of an the Interface to send data to.
payload : dict
A dictionary containing the path:value map for the aggregate.
timestamp : datetime, optional
If the Datastream has explicit_timestamp, you can specify a datetime object which will be registered
as the timestamp for the value.
"""
if not self.__is_interface_aggregate(interface_name):
raise Exception(
f'Interface {interface_name} is not an aggregate interface. You should use send.'
)
# TODO: validate paths
object_payload = {'v': payload}
if timestamp:
object_payload['t'] = timestamp
self.__send_generic(f'{self.__get_base_topic()}/{interface_name}',
object_payload) | def send_aggregate(self,
interface_name,
payload,
timestamp=None):
"""
Sends an aggregate message to an interface
Parameters
----------
interface_name : str
The name of an the Interface to send data to.
payload : dict
A dictionary containing the path:value map for the aggregate.
timestamp : datetime, optional
If the Datastream has explicit_timestamp, you can specify a datetime object which will be registered
as the timestamp for the value.
"""
if not self.__is_interface_aggregate(interface_name):
raise Exception(
f'Interface {interface_name} is not an aggregate interface. You should use send.'
)
# TODO: validate paths
object_payload = {'v': payload}
if timestamp:
object_payload['t'] = timestamp
self.__send_generic(f'{self.__get_base_topic()}/{interface_name}',
object_payload) |
Python | def span_context_to_string(trace_id, span_id, parent_id, flags):
"""
Serialize span ID to a string
{trace_id}:{span_id}:{parent_id}:{flags}
Numbers are encoded as variable-length lower-case hex strings.
If parent_id is None, it is written as 0.
:param trace_id:
:param span_id:
:param parent_id:
:param flags:
"""
parent_id = parent_id or 0
return '{:x}:{:x}:{:x}:{:x}'.format(trace_id, span_id, parent_id, flags) | def span_context_to_string(trace_id, span_id, parent_id, flags):
"""
Serialize span ID to a string
{trace_id}:{span_id}:{parent_id}:{flags}
Numbers are encoded as variable-length lower-case hex strings.
If parent_id is None, it is written as 0.
:param trace_id:
:param span_id:
:param parent_id:
:param flags:
"""
parent_id = parent_id or 0
return '{:x}:{:x}:{:x}:{:x}'.format(trace_id, span_id, parent_id, flags) |
Python | def span_context_from_string(value):
"""
Decode span ID from a string into a TraceContext.
Returns None if the string value is malformed.
:param value: formatted {trace_id}:{span_id}:{parent_id}:{flags}
"""
if type(value) is list and len(value) > 0:
# sometimes headers are presented as arrays of values
if len(value) > 1:
raise SpanContextCorruptedException(
'trace context must be a string or array of 1: "%s"' % value)
value = value[0]
if not isinstance(value, (str,)):
raise SpanContextCorruptedException(
'trace context not a string "%s"' % value)
parts = value.split(':')
if len(parts) != 4:
raise SpanContextCorruptedException(
'malformed trace context "%s"' % value)
try:
trace_id = int(parts[0], 16)
span_id = int(parts[1], 16)
parent_id = int(parts[2], 16)
flags = int(parts[3], 16)
if trace_id < 1 or span_id < 1 or parent_id < 0 or flags < 0:
raise SpanContextCorruptedException(
'malformed trace context "%s"' % value)
if parent_id == 0:
parent_id = None
return trace_id, span_id, parent_id, flags
except ValueError as e:
raise SpanContextCorruptedException(
'malformed trace context "%s": %s' % (value, e)) | def span_context_from_string(value):
"""
Decode span ID from a string into a TraceContext.
Returns None if the string value is malformed.
:param value: formatted {trace_id}:{span_id}:{parent_id}:{flags}
"""
if type(value) is list and len(value) > 0:
# sometimes headers are presented as arrays of values
if len(value) > 1:
raise SpanContextCorruptedException(
'trace context must be a string or array of 1: "%s"' % value)
value = value[0]
if not isinstance(value, (str,)):
raise SpanContextCorruptedException(
'trace context not a string "%s"' % value)
parts = value.split(':')
if len(parts) != 4:
raise SpanContextCorruptedException(
'malformed trace context "%s"' % value)
try:
trace_id = int(parts[0], 16)
span_id = int(parts[1], 16)
parent_id = int(parts[2], 16)
flags = int(parts[3], 16)
if trace_id < 1 or span_id < 1 or parent_id < 0 or flags < 0:
raise SpanContextCorruptedException(
'malformed trace context "%s"' % value)
if parent_id == 0:
parent_id = None
return trace_id, span_id, parent_id, flags
except ValueError as e:
raise SpanContextCorruptedException(
'malformed trace context "%s": %s' % (value, e)) |
Python | def close(self) -> Future:
"""
Ensure that all spans from the queue are submitted.
Returns Future that will be completed once the queue is empty.
"""
return ioloop_util.submit(self._flush, io_loop=self.io_loop) | def close(self) -> Future:
"""
Ensure that all spans from the queue are submitted.
Returns Future that will be completed once the queue is empty.
"""
return ioloop_util.submit(self._flush, io_loop=self.io_loop) |
Python | def start_span(self,
operation_name: Optional[str] = None,
child_of: Union[None, Span, SpanContext] = None,
references: Union[List[Reference], None, Reference] = None,
tags: Union[dict, None] = None,
start_time: Optional[float] = None,
ignore_active_span: bool = False,
) -> Span:
"""
Start and return a new Span representing a unit of work.
:param operation_name: name of the operation represented by the new
span from the perspective of the current service.
:param child_of: shortcut for 'child_of' reference
:param references: (optional) either a single Reference object or a
list of Reference objects that identify one or more parent
SpanContexts. (See the opentracing.Reference documentation for detail)
:param tags: optional dictionary of Span Tags. The caller gives up
ownership of that dictionary, because the Tracer may use it as-is
to avoid extra data copying.
:param start_time: an explicit Span start time as a unix timestamp per
time.time()
:param ignore_active_span: an explicit flag that ignores the current
active :class:`Scope` and creates a root :class:`Span`
:return: Returns an already-started Span instance.
"""
parent = child_of
if self.active_span is not None \
and not ignore_active_span \
and not parent:
parent = self.active_span
# allow Span to be passed as reference, not just SpanContext
if isinstance(parent, Span):
parent = parent.context
valid_references = None
if references:
valid_references = list()
if not isinstance(references, list):
references = [references]
for reference in references:
if reference.referenced_context is not None:
valid_references.append(reference)
# setting first reference as parent
if valid_references and (parent is None or not parent.has_trace):
parent = valid_references[0].referenced_context
rpc_server = bool(tags and tags.get(ext_tags.SPAN_KIND) == ext_tags.SPAN_KIND_RPC_SERVER)
if parent is None or not parent.has_trace:
trace_id = self._random_id(self.max_trace_id_bits)
span_id = self._random_id(constants._max_id_bits)
parent_id = None
flags = 0
baggage = None
if parent is None:
sampled, sampler_tags = \
self.sampler.is_sampled(trace_id, operation_name or '')
if sampled:
flags = SAMPLED_FLAG
tags = tags or {}
for k, v in sampler_tags.items():
tags[k] = v
elif parent.debug_id and self.is_debug_allowed(operation_name):
flags = SAMPLED_FLAG | DEBUG_FLAG
tags = tags or {}
tags[self.debug_id_header] = parent.debug_id
if parent and parent.baggage:
baggage = dict(parent.baggage) # TODO do we need to clone?
else:
trace_id = parent.trace_id
if rpc_server and self.one_span_per_rpc:
# Zipkin-style one-span-per-RPC
span_id = parent.span_id
parent_id = parent.parent_id
else:
span_id = self._random_id(constants._max_id_bits)
parent_id = parent.span_id
flags = parent.flags
baggage = dict(parent.baggage) # TODO do we need to clone?
span_ctx = SpanContext(trace_id=trace_id, span_id=span_id,
parent_id=parent_id, flags=flags,
baggage=baggage)
span = Span(context=span_ctx, tracer=self,
operation_name=operation_name or '',
tags=tags, start_time=start_time, references=valid_references)
self._emit_span_metrics(span=span, join=rpc_server)
return span | def start_span(self,
operation_name: Optional[str] = None,
child_of: Union[None, Span, SpanContext] = None,
references: Union[List[Reference], None, Reference] = None,
tags: Union[dict, None] = None,
start_time: Optional[float] = None,
ignore_active_span: bool = False,
) -> Span:
"""
Start and return a new Span representing a unit of work.
:param operation_name: name of the operation represented by the new
span from the perspective of the current service.
:param child_of: shortcut for 'child_of' reference
:param references: (optional) either a single Reference object or a
list of Reference objects that identify one or more parent
SpanContexts. (See the opentracing.Reference documentation for detail)
:param tags: optional dictionary of Span Tags. The caller gives up
ownership of that dictionary, because the Tracer may use it as-is
to avoid extra data copying.
:param start_time: an explicit Span start time as a unix timestamp per
time.time()
:param ignore_active_span: an explicit flag that ignores the current
active :class:`Scope` and creates a root :class:`Span`
:return: Returns an already-started Span instance.
"""
parent = child_of
if self.active_span is not None \
and not ignore_active_span \
and not parent:
parent = self.active_span
# allow Span to be passed as reference, not just SpanContext
if isinstance(parent, Span):
parent = parent.context
valid_references = None
if references:
valid_references = list()
if not isinstance(references, list):
references = [references]
for reference in references:
if reference.referenced_context is not None:
valid_references.append(reference)
# setting first reference as parent
if valid_references and (parent is None or not parent.has_trace):
parent = valid_references[0].referenced_context
rpc_server = bool(tags and tags.get(ext_tags.SPAN_KIND) == ext_tags.SPAN_KIND_RPC_SERVER)
if parent is None or not parent.has_trace:
trace_id = self._random_id(self.max_trace_id_bits)
span_id = self._random_id(constants._max_id_bits)
parent_id = None
flags = 0
baggage = None
if parent is None:
sampled, sampler_tags = \
self.sampler.is_sampled(trace_id, operation_name or '')
if sampled:
flags = SAMPLED_FLAG
tags = tags or {}
for k, v in sampler_tags.items():
tags[k] = v
elif parent.debug_id and self.is_debug_allowed(operation_name):
flags = SAMPLED_FLAG | DEBUG_FLAG
tags = tags or {}
tags[self.debug_id_header] = parent.debug_id
if parent and parent.baggage:
baggage = dict(parent.baggage) # TODO do we need to clone?
else:
trace_id = parent.trace_id
if rpc_server and self.one_span_per_rpc:
# Zipkin-style one-span-per-RPC
span_id = parent.span_id
parent_id = parent.parent_id
else:
span_id = self._random_id(constants._max_id_bits)
parent_id = parent.span_id
flags = parent.flags
baggage = dict(parent.baggage) # TODO do we need to clone?
span_ctx = SpanContext(trace_id=trace_id, span_id=span_id,
parent_id=parent_id, flags=flags,
baggage=baggage)
span = Span(context=span_ctx, tracer=self,
operation_name=operation_name or '',
tags=tags, start_time=start_time, references=valid_references)
self._emit_span_metrics(span=span, join=rpc_server)
return span |
Python | def close(self) -> Future:
"""
Perform a clean shutdown of the tracer, flushing any traces that
may be buffered in memory.
:return: Returns a tornado.concurrent.Future that indicates if the
flush has been completed.
"""
self.sampler.close()
return self.reporter.close() | def close(self) -> Future:
"""
Perform a clean shutdown of the tracer, flushing any traces that
may be buffered in memory.
:return: Returns a tornado.concurrent.Future that indicates if the
flush has been completed.
"""
self.sampler.close()
return self.reporter.close() |
Python | def _deriveLogin(self, email, firstName, lastName, userName=None):
"""
Attempt to automatically create a login name from existing user
information from OAuth2 providers. Attempts to generate it from the
username on the provider, the email address, or first and last name. If
not possible, returns None and it is left to the caller to generate
their own login for the user or choose to fail.
:param email: The email address.
:type email: str
"""
# Note, the user's OAuth2 ID should never be used to form a login name,
# as many OAuth2 services consider that to be private data
for login in self._generateLogins(email, firstName, lastName, userName):
login = login.lower()
if self._testLogin(login):
return login
raise Exception('Could not generate a unique login name for %s (%s %s)'
% (email, firstName, lastName)) | def _deriveLogin(self, email, firstName, lastName, userName=None):
"""
Attempt to automatically create a login name from existing user
information from OAuth2 providers. Attempts to generate it from the
username on the provider, the email address, or first and last name. If
not possible, returns None and it is left to the caller to generate
their own login for the user or choose to fail.
:param email: The email address.
:type email: str
"""
# Note, the user's OAuth2 ID should never be used to form a login name,
# as many OAuth2 services consider that to be private data
for login in self._generateLogins(email, firstName, lastName, userName):
login = login.lower()
if self._testLogin(login):
return login
raise Exception('Could not generate a unique login name for %s (%s %s)'
% (email, firstName, lastName)) |
Python | def _generateLogins(self, email, firstName, lastName, userName=None):
"""
Generate a series of reasonable login names for a new user based on
their basic information sent to us by the provider.
"""
# If they have a username on the other service, try that
if userName:
yield userName
userName = re.sub('[\W_]+', '', userName)
yield userName
for i in range(1, 6):
yield '%s%d' % (userName, i)
# Next try to use the prefix from their email address
prefix = email.split('@')[0]
yield prefix
yield re.sub('[\W_]+', '', prefix)
# Finally try to use their first and last name
yield '%s%s' % (firstName, lastName)
for i in range(1, 6):
yield '%s%s%d' % (firstName, lastName, i) | def _generateLogins(self, email, firstName, lastName, userName=None):
"""
Generate a series of reasonable login names for a new user based on
their basic information sent to us by the provider.
"""
# If they have a username on the other service, try that
if userName:
yield userName
userName = re.sub('[\W_]+', '', userName)
yield userName
for i in range(1, 6):
yield '%s%d' % (userName, i)
# Next try to use the prefix from their email address
prefix = email.split('@')[0]
yield prefix
yield re.sub('[\W_]+', '', prefix)
# Finally try to use their first and last name
yield '%s%s' % (firstName, lastName)
for i in range(1, 6):
yield '%s%s%d' % (firstName, lastName, i) |
Python | def validator(
*fields: str,
pre: bool = False,
each_item: bool = False,
always: bool = False,
check_fields: bool = False,
whole: bool = None,
allow_reuse: bool = False,
) -> Callable[[AnyCallable], classmethod]:
"""Shortcut to Pydantic `@validator` decorator with check_fields=False."""
return pyd_validator(
*fields,
pre=pre,
each_item=each_item,
always=always,
check_fields=check_fields,
whole=whole,
allow_reuse=allow_reuse,
) | def validator(
*fields: str,
pre: bool = False,
each_item: bool = False,
always: bool = False,
check_fields: bool = False,
whole: bool = None,
allow_reuse: bool = False,
) -> Callable[[AnyCallable], classmethod]:
"""Shortcut to Pydantic `@validator` decorator with check_fields=False."""
return pyd_validator(
*fields,
pre=pre,
each_item=each_item,
always=always,
check_fields=check_fields,
whole=whole,
allow_reuse=allow_reuse,
) |
Python | def add_fields(cls, **field_definitions: Tuple[str, Any]) -> None:
"""Add fields to the model.
Adapted from here :
https://github.com/samuelcolvin/pydantic/issues/1937#issuecomment-695313040
"""
new_fields: Dict[str, ModelField] = {}
new_annotations: Dict[str, Optional[type]] = {}
validators = None
for f_name, f_def in field_definitions.items():
f_annotation, f_value = f_def
if cls.__vg__:
validators = cls.__vg__.get_validators(f_name)
new_fields[f_name] = ModelField.infer(
name=f_name,
value=f_value,
annotation=f_annotation,
class_validators=validators,
config=cls.__config__,
)
cls.__fields__.update(new_fields)
cls.__annotations__.update(new_annotations) | def add_fields(cls, **field_definitions: Tuple[str, Any]) -> None:
"""Add fields to the model.
Adapted from here :
https://github.com/samuelcolvin/pydantic/issues/1937#issuecomment-695313040
"""
new_fields: Dict[str, ModelField] = {}
new_annotations: Dict[str, Optional[type]] = {}
validators = None
for f_name, f_def in field_definitions.items():
f_annotation, f_value = f_def
if cls.__vg__:
validators = cls.__vg__.get_validators(f_name)
new_fields[f_name] = ModelField.infer(
name=f_name,
value=f_value,
annotation=f_annotation,
class_validators=validators,
config=cls.__config__,
)
cls.__fields__.update(new_fields)
cls.__annotations__.update(new_annotations) |
Python | def register_scalar(self, name: str, typing: Any) -> None:
"""Register a custom scalar to use when binding pydantic models.
Args:
name (str): Scalar name, must match the one the schema
typing (Any): Python typing for the scalar
"""
self._type_map[name] = typing | def register_scalar(self, name: str, typing: Any) -> None:
"""Register a custom scalar to use when binding pydantic models.
Args:
name (str): Scalar name, must match the one the schema
typing (Any): Python typing for the scalar
"""
self._type_map[name] = typing |
Python | def resolve_field_typing(
self, gql_field, schema: GraphQLSchema
) -> Tuple[Any, Optional[Any]]:
"""Find out the proper typing to use for a given GraphQL field.
Args:
gql_field ([type]): The GraphQL for which to find typing
schema (GraphQLSchema): GraphQL schema
Raises:
PydanticBindError: Raised when the GraphQL field type is a custom type for which
no pydantic model has been defined.
Returns:
Tuple[Any, Optional[Any]]: A tuple `(typing, default_value)` to pass to `add_fields`.
"""
field_type: Any = None
default_value = None
if isinstance(gql_field, (GraphQLObjectType, GraphQLInputObjectType)):
sub_model: Optional[Type[GraphQLModel]] = self.models.get(gql_field.name)
if not sub_model:
raise PydanticBindError(
f'There is no pydantic model binded to "{gql_field.name}" GraphQL type'
)
if not sub_model.__initialized__:
self.process_model(sub_model, schema)
field_type = sub_model
elif isinstance(gql_field, GraphQLNonNull):
field_type, _ = self.resolve_field_typing(gql_field.of_type, schema)
# Ellipsis as default value in the pydantic model mark the field as required
default_value = ...
elif isinstance(gql_field, GraphQLScalarType):
field_type = self._type_map.get(gql_field.name)
elif isinstance(gql_field, GraphQLList):
of_type, default_of_type = self.resolve_field_typing(
gql_field.of_type, schema
)
if default_of_type is None:
of_type = Optional[of_type]
field_type = List[of_type] # type: ignore
return field_type, default_value | def resolve_field_typing(
self, gql_field, schema: GraphQLSchema
) -> Tuple[Any, Optional[Any]]:
"""Find out the proper typing to use for a given GraphQL field.
Args:
gql_field ([type]): The GraphQL for which to find typing
schema (GraphQLSchema): GraphQL schema
Raises:
PydanticBindError: Raised when the GraphQL field type is a custom type for which
no pydantic model has been defined.
Returns:
Tuple[Any, Optional[Any]]: A tuple `(typing, default_value)` to pass to `add_fields`.
"""
field_type: Any = None
default_value = None
if isinstance(gql_field, (GraphQLObjectType, GraphQLInputObjectType)):
sub_model: Optional[Type[GraphQLModel]] = self.models.get(gql_field.name)
if not sub_model:
raise PydanticBindError(
f'There is no pydantic model binded to "{gql_field.name}" GraphQL type'
)
if not sub_model.__initialized__:
self.process_model(sub_model, schema)
field_type = sub_model
elif isinstance(gql_field, GraphQLNonNull):
field_type, _ = self.resolve_field_typing(gql_field.of_type, schema)
# Ellipsis as default value in the pydantic model mark the field as required
default_value = ...
elif isinstance(gql_field, GraphQLScalarType):
field_type = self._type_map.get(gql_field.name)
elif isinstance(gql_field, GraphQLList):
of_type, default_of_type = self.resolve_field_typing(
gql_field.of_type, schema
)
if default_of_type is None:
of_type = Optional[of_type]
field_type = List[of_type] # type: ignore
return field_type, default_value |
Python | def resolve_model_fields(
self, model: Type[GraphQLModel], gql_type: Any, schema: GraphQLSchema
) -> Dict[str, Any]:
"""Translate fields from a GraphQL type into pydantic ones.
Args:
gql_type (Any): GraphQL type on which to translate fields
schema (GraphQLSchema): GraphQL schema
Raises:
PydanticBindError: Raised when a fields can't be translated
Returns:
Dict[str, Any]: A dict with pydantic field names as keys and pydantic fields as values.
All field names are converted to snake_case
"""
pyd_fields = {}
input_fields: List[str] = gql_type.fields.keys()
if model.GraphQL.include is not None:
input_fields = model.GraphQL.include
if model.GraphQL.exclude:
raise PydanticBindError(
"You cannot use include and exclude on a GraphQLModel"
)
for name in input_fields:
if name not in model.GraphQL.exclude:
try:
field = gql_type.fields[name]
except KeyError as error:
raise PydanticBindError(
f'field "{name}" does not exist on type {gql_type.name}'
) from error
field_type, default_value = self.resolve_field_typing(
field.type, schema
)
if model.GraphQL.fields and name in model.GraphQL.fields:
field_type = model.GraphQL.fields[name]
if field_type is None:
raise PydanticBindError(
f'Don\'t know how to map "{name}" field from GraphQL type {gql_type.name}'
)
if default_value is None:
field_type = Optional[field_type]
# Convert names to snake case
pyd_fields[convert_camel_case_to_snake(name)] = (
field_type,
default_value,
)
return pyd_fields | def resolve_model_fields(
self, model: Type[GraphQLModel], gql_type: Any, schema: GraphQLSchema
) -> Dict[str, Any]:
"""Translate fields from a GraphQL type into pydantic ones.
Args:
gql_type (Any): GraphQL type on which to translate fields
schema (GraphQLSchema): GraphQL schema
Raises:
PydanticBindError: Raised when a fields can't be translated
Returns:
Dict[str, Any]: A dict with pydantic field names as keys and pydantic fields as values.
All field names are converted to snake_case
"""
pyd_fields = {}
input_fields: List[str] = gql_type.fields.keys()
if model.GraphQL.include is not None:
input_fields = model.GraphQL.include
if model.GraphQL.exclude:
raise PydanticBindError(
"You cannot use include and exclude on a GraphQLModel"
)
for name in input_fields:
if name not in model.GraphQL.exclude:
try:
field = gql_type.fields[name]
except KeyError as error:
raise PydanticBindError(
f'field "{name}" does not exist on type {gql_type.name}'
) from error
field_type, default_value = self.resolve_field_typing(
field.type, schema
)
if model.GraphQL.fields and name in model.GraphQL.fields:
field_type = model.GraphQL.fields[name]
if field_type is None:
raise PydanticBindError(
f'Don\'t know how to map "{name}" field from GraphQL type {gql_type.name}'
)
if default_value is None:
field_type = Optional[field_type]
# Convert names to snake case
pyd_fields[convert_camel_case_to_snake(name)] = (
field_type,
default_value,
)
return pyd_fields |
Python | def process_model(self, model: Type[GraphQLModel], schema: GraphQLSchema) -> None:
"""Add fields to the given pydantic model.
Args:
model (Type[GraphQLModel]): The pydantic model on which to add fields
schema (GraphQLSchema): GraphQL schema
Raises:
PydanticBindError: Raised if `gql_type` is None or
if no corresponding GraphQL type has been found.
"""
if not model.GraphQL.gql_type:
raise PydanticBindError(
f"Can't find gql_type on pydantic model {model.__name__}."
" You must define gql_type attribute in the GraphQL inner class"
" when subclassing GraphQLToPydantic"
)
type_ = schema.type_map.get(model.GraphQL.gql_type)
if not type_:
raise PydanticBindError(
f"The GraphQL type {model.GraphQL.gql_type} does not exists"
)
if not model.__initialized__:
fields = self.resolve_model_fields(model, type_, schema)
model.__initialized__ = True
model.add_fields(**fields) | def process_model(self, model: Type[GraphQLModel], schema: GraphQLSchema) -> None:
"""Add fields to the given pydantic model.
Args:
model (Type[GraphQLModel]): The pydantic model on which to add fields
schema (GraphQLSchema): GraphQL schema
Raises:
PydanticBindError: Raised if `gql_type` is None or
if no corresponding GraphQL type has been found.
"""
if not model.GraphQL.gql_type:
raise PydanticBindError(
f"Can't find gql_type on pydantic model {model.__name__}."
" You must define gql_type attribute in the GraphQL inner class"
" when subclassing GraphQLToPydantic"
)
type_ = schema.type_map.get(model.GraphQL.gql_type)
if not type_:
raise PydanticBindError(
f"The GraphQL type {model.GraphQL.gql_type} does not exists"
)
if not model.__initialized__:
fields = self.resolve_model_fields(model, type_, schema)
model.__initialized__ = True
model.add_fields(**fields) |
Python | async def csrf(request): # pylint: disable=unused-argument
"""CSRF route.
Set the CSRF cookie and return a `JSONResponse with the token`.
We need this REST endpoint to protect against CSRF because all GraphQL queries use POST method,
so they are not safe to transmit the token.
"""
token = get_new_token()
response = JSONResponse({"csrftoken": token})
response.set_cookie(
settings.CSRF_COOKIE_NAME,
token,
httponly=settings.CSRF_COOKIE_HTTPONLY,
secure=settings.CSRF_COOKIE_SECURE,
)
return response | async def csrf(request): # pylint: disable=unused-argument
"""CSRF route.
Set the CSRF cookie and return a `JSONResponse with the token`.
We need this REST endpoint to protect against CSRF because all GraphQL queries use POST method,
so they are not safe to transmit the token.
"""
token = get_new_token()
response = JSONResponse({"csrftoken": token})
response.set_cookie(
settings.CSRF_COOKIE_NAME,
token,
httponly=settings.CSRF_COOKIE_HTTPONLY,
secure=settings.CSRF_COOKIE_SECURE,
)
return response |
Python | def auth_user_tablename() -> str:
"""Get the auth table name from settings or generate it."""
return settings.AUTH_USER_MODEL_TABLENAME or get_tablename(
settings.AUTH_USER_MODEL.rsplit(".", 3)[-3],
settings.AUTH_USER_MODEL.split(".")[-1],
) | def auth_user_tablename() -> str:
"""Get the auth table name from settings or generate it."""
return settings.AUTH_USER_MODEL_TABLENAME or get_tablename(
settings.AUTH_USER_MODEL.rsplit(".", 3)[-3],
settings.AUTH_USER_MODEL.split(".")[-1],
) |
Python | async def _get_object(
self,
obj_class: Model,
key: str,
obj: Type[Model] = None,
identifier: str = None,
) -> Model:
"""Lazy getter for model objects."""
obj_ = None
if obj:
obj_ = obj
elif identifier:
obj_ = await obj_class.query.where(
getattr(obj_class, key) == identifier
).gino.first()
if not obj_:
raise DoesNotExist(self)
if not obj_:
raise Exception(
f"You must provide either a {obj_class.__name__} object or a {key}"
)
return obj_ | async def _get_object(
self,
obj_class: Model,
key: str,
obj: Type[Model] = None,
identifier: str = None,
) -> Model:
"""Lazy getter for model objects."""
obj_ = None
if obj:
obj_ = obj
elif identifier:
obj_ = await obj_class.query.where(
getattr(obj_class, key) == identifier
).gino.first()
if not obj_:
raise DoesNotExist(self)
if not obj_:
raise Exception(
f"You must provide either a {obj_class.__name__} object or a {key}"
)
return obj_ |
Python | async def add_role(
self, role: Optional[Permission] = None, name: Optional[str] = None
):
"""Add a role to the user."""
role_ = await self._get_object(Role, "name", role, name)
await UserRole.create(user=self.id, role=role_.id) | async def add_role(
self, role: Optional[Permission] = None, name: Optional[str] = None
):
"""Add a role to the user."""
role_ = await self._get_object(Role, "name", role, name)
await UserRole.create(user=self.id, role=role_.id) |
Python | async def role_perms(self) -> List[Role]:
"""Load user roles and permissions."""
query = UserRole.join(Role).join(RolePermission).join(Permission).select()
return (
await query.where(UserRole.user == self.id)
.gino.load(Role.distinct(Role.id).load(add_permission=Permission.load()))
.query.gino.all()
) | async def role_perms(self) -> List[Role]:
"""Load user roles and permissions."""
query = UserRole.join(Role).join(RolePermission).join(Permission).select()
return (
await query.where(UserRole.user == self.id)
.gino.load(Role.distinct(Role.id).load(add_permission=Permission.load()))
.query.gino.all()
) |
Python | def error_formatter(error: GraphQLError, debug: bool = False):
"""Replace Ariadne default error formatter.
Args:
error (GraphQLError): The GraphQL error
debug (bool, optional): True if ASGI app has been
instantiated with debug=True. Defaults to False.
Returns:
dict: [description]
"""
if debug:
# If debug is enabled, reuse Ariadne's formatting logic
formatted = format_error(error, debug)
else:
formatted = error.formatted # pragma: no cover
return formatted | def error_formatter(error: GraphQLError, debug: bool = False):
"""Replace Ariadne default error formatter.
Args:
error (GraphQLError): The GraphQL error
debug (bool, optional): True if ASGI app has been
instantiated with debug=True. Defaults to False.
Returns:
dict: [description]
"""
if debug:
# If debug is enabled, reuse Ariadne's formatting logic
formatted = format_error(error, debug)
else:
formatted = error.formatted # pragma: no cover
return formatted |
Python | def assert_data_in_response(cls, response: dict, data: Dict[str, Any]):
"""Assert that the response contains the specified key with the corresponding values.
Args:
response (dict): Response data to check
data (Dict[str, Any]): Data that should be present in `response`
"""
for key, value in data.items():
if key in response:
assert response[key] == value | def assert_data_in_response(cls, response: dict, data: Dict[str, Any]):
"""Assert that the response contains the specified key with the corresponding values.
Args:
response (dict): Response data to check
data (Dict[str, Any]): Data that should be present in `response`
"""
for key, value in data.items():
if key in response:
assert response[key] == value |
Python | async def _get_scopes(user: user_model) -> list:
"""Return a list of user role names."""
role_perms = await user.role_perms()
# Cache role permissions if they are not there
for role in role_perms:
cached_role = await cache.get(role.name)
if not cached_role:
permissions = [p.to_dict() for p in role.permissions]
await cache.set(role.name, permissions)
return [role.name for role in role_perms] | async def _get_scopes(user: user_model) -> list:
"""Return a list of user role names."""
role_perms = await user.role_perms()
# Cache role permissions if they are not there
for role in role_perms:
cached_role = await cache.get(role.name)
if not cached_role:
permissions = [p.to_dict() for p in role.permissions]
await cache.set(role.name, permissions)
return [role.name for role in role_perms] |
Python | def run_migrations_offline(metadata, config): # pragma: no cover
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations() | def run_migrations_offline(metadata, config): # pragma: no cover
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations() |
Python | def run_migrations_online(metadata, config): # pragma: no cover
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=metadata)
with context.begin_transaction():
context.run_migrations() | def run_migrations_online(metadata, config): # pragma: no cover
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=metadata)
with context.begin_transaction():
context.run_migrations() |
Python | def verify_expected(expected):
""" binds the expected value to the acoustics_verify methods """
def acoustics_verify(claw):
from clawpack.pyclaw.util import check_diff
import numpy as np
# tests are done across the entire domain of q normally
q0=claw.frames[0].state.get_q_global()
qfinal=claw.frames[claw.num_output_times].state.get_q_global()
# and q_global is only returned on process 0
if q0 != None and qfinal != None:
q0 = q0.reshape([-1])
qfinal = qfinal.reshape([-1])
dx=claw.solution.domain.grid.delta[0]
test = dx*np.sum(np.abs(qfinal-q0))
return check_diff(expected, test, abstol=1e-4)
else:
return
return acoustics_verify | def verify_expected(expected):
""" binds the expected value to the acoustics_verify methods """
def acoustics_verify(claw):
from clawpack.pyclaw.util import check_diff
import numpy as np
# tests are done across the entire domain of q normally
q0=claw.frames[0].state.get_q_global()
qfinal=claw.frames[claw.num_output_times].state.get_q_global()
# and q_global is only returned on process 0
if q0 != None and qfinal != None:
q0 = q0.reshape([-1])
qfinal = qfinal.reshape([-1])
dx=claw.solution.domain.grid.delta[0]
test = dx*np.sum(np.abs(qfinal-q0))
return check_diff(expected, test, abstol=1e-4)
else:
return
return acoustics_verify |
Python | def advection(kernel_language='Python',iplot=False,htmlplot=False,use_petsc=False,solver_type='classic',outdir='./_output'):
"""
Example python script for solving the 1d advection equation.
"""
import numpy as np
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
else:
solver = pyclaw.ClawSolver1D()
solver.kernel_language = kernel_language
from clawpack.riemann import rp_advection
solver.num_waves = rp_advection.num_waves
if solver.kernel_language=='Python':
solver.rp = rp_advection.rp_advection_1d
else:
from clawpack import riemann
solver.rp = riemann.rp1_advection
solver.bc_lower[0] = 2
solver.bc_upper[0] = 2
x = pyclaw.Dimension('x',0.0,1.0,100)
domain = pyclaw.Domain(x)
num_eqn = 1
state = pyclaw.State(domain,num_eqn)
state.problem_data['u']=1.
grid = state.grid
xc=grid.x.centers
beta=100; gamma=0; x0=0.75
state.q[0,:] = np.exp(-beta * (xc-x0)**2) * np.cos(gamma * (xc - x0))
claw = pyclaw.Controller()
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir = outdir
claw.tfinal =1.0
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) | def advection(kernel_language='Python',iplot=False,htmlplot=False,use_petsc=False,solver_type='classic',outdir='./_output'):
"""
Example python script for solving the 1d advection equation.
"""
import numpy as np
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
else:
solver = pyclaw.ClawSolver1D()
solver.kernel_language = kernel_language
from clawpack.riemann import rp_advection
solver.num_waves = rp_advection.num_waves
if solver.kernel_language=='Python':
solver.rp = rp_advection.rp_advection_1d
else:
from clawpack import riemann
solver.rp = riemann.rp1_advection
solver.bc_lower[0] = 2
solver.bc_upper[0] = 2
x = pyclaw.Dimension('x',0.0,1.0,100)
domain = pyclaw.Domain(x)
num_eqn = 1
state = pyclaw.State(domain,num_eqn)
state.problem_data['u']=1.
grid = state.grid
xc=grid.x.centers
beta=100; gamma=0; x0=0.75
state.q[0,:] = np.exp(-beta * (xc-x0)**2) * np.cos(gamma * (xc - x0))
claw = pyclaw.Controller()
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir = outdir
claw.tfinal =1.0
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) |
Python | def check_validity(self):
r"""Check that the controller has been properly set up and is ready to run.
Checks validity of the solver
"""
# Check to make sure we have a valid solver to use
if self.solver is None:
raise Exception("No solver set in controller.")
if not isinstance(self.solver,Solver):
raise Exception("Solver is not of correct type.")
if not self.solver.is_valid():
raise Exception("The solver failed to initialize properly.")
# Check to make sure the initial solution is valid
if not self.solution.is_valid():
raise Exception("Initial solution is not valid.")
if not all([state.is_valid() for state in self.solution.states]):
raise Exception("Initial states are not valid.") | def check_validity(self):
r"""Check that the controller has been properly set up and is ready to run.
Checks validity of the solver
"""
# Check to make sure we have a valid solver to use
if self.solver is None:
raise Exception("No solver set in controller.")
if not isinstance(self.solver,Solver):
raise Exception("Solver is not of correct type.")
if not self.solver.is_valid():
raise Exception("The solver failed to initialize properly.")
# Check to make sure the initial solution is valid
if not self.solution.is_valid():
raise Exception("Initial solution is not valid.")
if not all([state.is_valid() for state in self.solution.states]):
raise Exception("Initial states are not valid.") |
Python | def run(self):
r"""
Convenience routine that will evolve solution based on the
traditional clawpack output and run parameters.
This function uses the run parameters and solver parameters to evolve
the solution to the end time specified in run_data, outputting at the
appropriate times.
:Input:
None
:Ouput:
(dict) - Return a dictionary of the status of the solver.
:Version: 1.0 (2009-05-01)
"""
import numpy as np
frame = FrameCounter()
frame.set_counter(self.start_frame)
if self.keep_copy:
self.frames = []
self.solver.setup(self.solution)
self.solver.dt = self.solver.dt_initial
self.check_validity()
# Write initial gauge values
self.solver.write_gauge_values(self.solution)
# Output styles
if self.output_style == 1:
output_times = np.linspace(self.solution.t,
self.tfinal,self.num_output_times+1)
elif self.output_style == 2:
output_times = self.out_times
elif self.output_style == 3:
output_times = np.ones((self.num_output_times+1))
else:
raise Exception("Invalid output style %s" % self.output_style)
# Output and save initial frame
if self.keep_copy:
self.frames.append(copy.deepcopy(self.solution))
if self.output_format is not None:
if os.path.exists(self.outdir) and self.overwrite==False:
raise Exception("Refusing to overwrite existing output data. \
\nEither delete/move the directory or set controller.overwrite=True.")
if self.compute_p is not None:
self.compute_p(self.solution.state)
self.solution.write(frame,self.outdir_p,
self.output_format,
self.file_prefix_p,
write_aux = False,
options = self.output_options,
write_p = True)
self.solution.write(frame,self.outdir,
self.output_format,
self.output_file_prefix,
self.write_aux_init,
self.output_options)
self.write_F('w')
self.log_info("Solution %s computed for time t=%f" %
(frame,self.solution.t) )
for t in output_times[1:]:
if self.output_style < 3:
status = self.solver.evolve_to_time(self.solution,t)
else:
# Take nstepout steps and output
for n in xrange(self.nstepout):
status = self.solver.evolve_to_time(self.solution)
frame.increment()
if self.keep_copy:
# Save current solution to dictionary with frame as key
self.frames.append(copy.deepcopy(self.solution))
if self.output_format is not None:
if self.compute_p is not None:
self.compute_p(self.solution.state)
self.solution.write(frame,self.outdir_p,
self.output_format,
self.file_prefix_p,
write_aux = False,
options = self.output_options,
write_p = True)
self.solution.write(frame,self.outdir,
self.output_format,
self.output_file_prefix,
self.write_aux_always,
self.output_options)
self.write_F()
self.log_info("Solution %s computed for time t=%f"
% (frame,self.solution.t))
for gfile in self.solution.state.grid.gauge_files:
gfile.flush()
self.solver.teardown()
for gfile in self.solution.state.grid.gauge_files: gfile.close()
# Return the current status of the solver
return status | def run(self):
r"""
Convenience routine that will evolve solution based on the
traditional clawpack output and run parameters.
This function uses the run parameters and solver parameters to evolve
the solution to the end time specified in run_data, outputting at the
appropriate times.
:Input:
None
:Ouput:
(dict) - Return a dictionary of the status of the solver.
:Version: 1.0 (2009-05-01)
"""
import numpy as np
frame = FrameCounter()
frame.set_counter(self.start_frame)
if self.keep_copy:
self.frames = []
self.solver.setup(self.solution)
self.solver.dt = self.solver.dt_initial
self.check_validity()
# Write initial gauge values
self.solver.write_gauge_values(self.solution)
# Output styles
if self.output_style == 1:
output_times = np.linspace(self.solution.t,
self.tfinal,self.num_output_times+1)
elif self.output_style == 2:
output_times = self.out_times
elif self.output_style == 3:
output_times = np.ones((self.num_output_times+1))
else:
raise Exception("Invalid output style %s" % self.output_style)
# Output and save initial frame
if self.keep_copy:
self.frames.append(copy.deepcopy(self.solution))
if self.output_format is not None:
if os.path.exists(self.outdir) and self.overwrite==False:
raise Exception("Refusing to overwrite existing output data. \
\nEither delete/move the directory or set controller.overwrite=True.")
if self.compute_p is not None:
self.compute_p(self.solution.state)
self.solution.write(frame,self.outdir_p,
self.output_format,
self.file_prefix_p,
write_aux = False,
options = self.output_options,
write_p = True)
self.solution.write(frame,self.outdir,
self.output_format,
self.output_file_prefix,
self.write_aux_init,
self.output_options)
self.write_F('w')
self.log_info("Solution %s computed for time t=%f" %
(frame,self.solution.t) )
for t in output_times[1:]:
if self.output_style < 3:
status = self.solver.evolve_to_time(self.solution,t)
else:
# Take nstepout steps and output
for n in xrange(self.nstepout):
status = self.solver.evolve_to_time(self.solution)
frame.increment()
if self.keep_copy:
# Save current solution to dictionary with frame as key
self.frames.append(copy.deepcopy(self.solution))
if self.output_format is not None:
if self.compute_p is not None:
self.compute_p(self.solution.state)
self.solution.write(frame,self.outdir_p,
self.output_format,
self.file_prefix_p,
write_aux = False,
options = self.output_options,
write_p = True)
self.solution.write(frame,self.outdir,
self.output_format,
self.output_file_prefix,
self.write_aux_always,
self.output_options)
self.write_F()
self.log_info("Solution %s computed for time t=%f"
% (frame,self.solution.t))
for gfile in self.solution.state.grid.gauge_files:
gfile.flush()
self.solver.teardown()
for gfile in self.solution.state.grid.gauge_files: gfile.close()
# Return the current status of the solver
return status |
Python | def _create_DA(self):
r"""Returns a PETSc DA and associated global Vec.
Note that no local vector is returned.
"""
from petsc4py import PETSc
if hasattr(PETSc.DA, 'PeriodicType'):
if self.num_dim == 1:
periodic_type = PETSc.DA.PeriodicType.X
elif self.num_dim == 2:
periodic_type = PETSc.DA.PeriodicType.XY
elif self.num_dim == 3:
periodic_type = PETSc.DA.PeriodicType.XYZ
else:
raise Exception("Invalid number of dimensions")
DA = PETSc.DA().create(dim=self.num_dim,
dof=1,
sizes=self.num_cells_global,
periodic_type = periodic_type,
stencil_width=0,
comm=PETSc.COMM_WORLD)
else:
DA = PETSc.DA().create(dim=self.num_dim,
dof=1,
sizes=self.num_cells_global,
boundary_type = PETSc.DA.BoundaryType.PERIODIC,
stencil_width=0,
comm=PETSc.COMM_WORLD)
return DA | def _create_DA(self):
r"""Returns a PETSc DA and associated global Vec.
Note that no local vector is returned.
"""
from petsc4py import PETSc
if hasattr(PETSc.DA, 'PeriodicType'):
if self.num_dim == 1:
periodic_type = PETSc.DA.PeriodicType.X
elif self.num_dim == 2:
periodic_type = PETSc.DA.PeriodicType.XY
elif self.num_dim == 3:
periodic_type = PETSc.DA.PeriodicType.XYZ
else:
raise Exception("Invalid number of dimensions")
DA = PETSc.DA().create(dim=self.num_dim,
dof=1,
sizes=self.num_cells_global,
periodic_type = periodic_type,
stencil_width=0,
comm=PETSc.COMM_WORLD)
else:
DA = PETSc.DA().create(dim=self.num_dim,
dof=1,
sizes=self.num_cells_global,
boundary_type = PETSc.DA.BoundaryType.PERIODIC,
stencil_width=0,
comm=PETSc.COMM_WORLD)
return DA |
Python | def advection2D(iplot=False,use_petsc=False,htmlplot=False,outdir='./_output',solver_type='classic'):
"""
Example python script for solving the 2d advection equation.
"""
#===========================================================================
# Import libraries
#===========================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
if solver_type=='classic':
solver = pyclaw.ClawSolver2D()
solver.dimensional_split = 1
solver.limiters = pyclaw.limiters.tvd.vanleer
elif solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver2D()
from clawpack import riemann
solver.rp = riemann.rp2_advection
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
solver.bc_lower[1] = pyclaw.BC.periodic
solver.bc_upper[1] = pyclaw.BC.periodic
solver.num_waves = 1
solver.cfl_max=1.0
solver.cfl_desired = 0.9
#===========================================================================
# Initialize domain, then initialize the solution associated to the domain and
# finally initialize aux array
#===========================================================================
# Domain:
mx=50; my=50
x = pyclaw.Dimension('x',0.0,1.0,mx)
y = pyclaw.Dimension('y',0.0,1.0,my)
domain = pyclaw.Domain([x,y])
num_eqn = 1
state = pyclaw.State(domain,num_eqn)
state.problem_data['u'] = 0.5 # Parameters (global auxiliary variables)
state.problem_data['v'] = 1.0
# Initial solution
# ================
qinit(state) # This function is defined above
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = 2.0
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir = outdir
#===========================================================================
# Solve the problem
#===========================================================================
status = claw.run()
#===========================================================================
# Plot results
#===========================================================================
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) | def advection2D(iplot=False,use_petsc=False,htmlplot=False,outdir='./_output',solver_type='classic'):
"""
Example python script for solving the 2d advection equation.
"""
#===========================================================================
# Import libraries
#===========================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
if solver_type=='classic':
solver = pyclaw.ClawSolver2D()
solver.dimensional_split = 1
solver.limiters = pyclaw.limiters.tvd.vanleer
elif solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver2D()
from clawpack import riemann
solver.rp = riemann.rp2_advection
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
solver.bc_lower[1] = pyclaw.BC.periodic
solver.bc_upper[1] = pyclaw.BC.periodic
solver.num_waves = 1
solver.cfl_max=1.0
solver.cfl_desired = 0.9
#===========================================================================
# Initialize domain, then initialize the solution associated to the domain and
# finally initialize aux array
#===========================================================================
# Domain:
mx=50; my=50
x = pyclaw.Dimension('x',0.0,1.0,mx)
y = pyclaw.Dimension('y',0.0,1.0,my)
domain = pyclaw.Domain([x,y])
num_eqn = 1
state = pyclaw.State(domain,num_eqn)
state.problem_data['u'] = 0.5 # Parameters (global auxiliary variables)
state.problem_data['v'] = 1.0
# Initial solution
# ================
qinit(state) # This function is defined above
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = 2.0
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir = outdir
#===========================================================================
# Solve the problem
#===========================================================================
status = claw.run()
#===========================================================================
# Plot results
#===========================================================================
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) |
Python | def write(self,frame,path='./',file_format='ascii',file_prefix=None,
write_aux=False,options={},write_p=False):
r"""
Write out a representation of the solution
Writes out a suitable representation of this solution object based on
the format requested. The path is built from the optional path and
file_prefix arguments. Will raise an IOError if unsuccessful.
:Input:
- *frame* - (int) Frame number to append to the file output
- *path* - (string) Root path, will try and create the path if it
does not already exist. ``default = './'``
- *format* - (string or list of strings) a string or list of strings
containing the desired output formats. ``default = 'ascii'``
- *file_prefix* - (string) Prefix for the file name. Defaults to
the particular io modules default.
- *write_aux* - (book) Write the auxillary array out as well if
present. ``default = False``
- *options* - (dict) Dictionary of optional arguments dependent on
which format is being used. ``default = {}``
"""
# Determine if we need to create the path
path = os.path.expandvars(os.path.expanduser(path))
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
print "directory already exists, ignoring"
# Call the correct write function based on the output format
if isinstance(file_format,str):
format_list = [file_format]
elif isinstance(file_format,list):
format_list = file_format
if 'petsc' in format_list:
from clawpack.petclaw import io
# Loop over list of formats requested
for form in format_list:
write_func = eval('io.write_%s' % form)
if file_prefix is None:
write_func(self,frame,path,write_aux=write_aux,
options=options,write_p=write_p)
else:
write_func(self,frame,path,file_prefix=file_prefix,
write_aux=write_aux,options=options,
write_p=write_p)
msg = "Wrote out solution in format %s for time t=%s" % (form,self.t)
logging.getLogger('io').info(msg) | def write(self,frame,path='./',file_format='ascii',file_prefix=None,
write_aux=False,options={},write_p=False):
r"""
Write out a representation of the solution
Writes out a suitable representation of this solution object based on
the format requested. The path is built from the optional path and
file_prefix arguments. Will raise an IOError if unsuccessful.
:Input:
- *frame* - (int) Frame number to append to the file output
- *path* - (string) Root path, will try and create the path if it
does not already exist. ``default = './'``
- *format* - (string or list of strings) a string or list of strings
containing the desired output formats. ``default = 'ascii'``
- *file_prefix* - (string) Prefix for the file name. Defaults to
the particular io modules default.
- *write_aux* - (book) Write the auxillary array out as well if
present. ``default = False``
- *options* - (dict) Dictionary of optional arguments dependent on
which format is being used. ``default = {}``
"""
# Determine if we need to create the path
path = os.path.expandvars(os.path.expanduser(path))
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError:
print "directory already exists, ignoring"
# Call the correct write function based on the output format
if isinstance(file_format,str):
format_list = [file_format]
elif isinstance(file_format,list):
format_list = file_format
if 'petsc' in format_list:
from clawpack.petclaw import io
# Loop over list of formats requested
for form in format_list:
write_func = eval('io.write_%s' % form)
if file_prefix is None:
write_func(self,frame,path,write_aux=write_aux,
options=options,write_p=write_p)
else:
write_func(self,frame,path,file_prefix=file_prefix,
write_aux=write_aux,options=options,
write_p=write_p)
msg = "Wrote out solution in format %s for time t=%s" % (form,self.t)
logging.getLogger('io').info(msg) |
Python | def read(self,frame,path='./_output',file_format='ascii',file_prefix=None,
read_aux=True,options={}, **kargs):
r"""
Reads in a Solution object from a file
Reads in and initializes this Solution with the data specified. This
function will raise an IOError if it was unsuccessful.
Any format must conform to the following call signiture and return
True if the file has been successfully read into the given solution or
False otherwise. Options is a dictionary of parameters that each
format can specify. See the ascii module for an example.::
read_<format>(solution,path,frame,file_prefix,options={})
``<format>`` is the name of the format in question.
:Input:
- *frame* - (int) Frame number to be read in
- *path* - (string) Base path to the files to be read.
``default = './'``
- *format* - (string) Format of the file, should match on of the
modules inside of the io package. ``default = 'ascii'``
- *file_prefix* - (string) Name prefix in front of all the files,
defaults to whatever the format defaults to, e.g. fort for ascii
- *options* - (dict) Dictionary of optional arguments dependent on
the format being read in. ``default = {}``
:Output:
- (bool) - True if read was successful, False otherwise
"""
if file_format=='petsc':
from clawpack.petclaw import io
path = os.path.expandvars(os.path.expanduser(path))
read_func = eval('io.read_%s' % file_format)
if file_prefix is None:
read_func(self,frame,path,read_aux=read_aux,options=options)
else:
read_func(self,frame,path,file_prefix=file_prefix,
read_aux=read_aux,options=options)
logging.getLogger('io').info("Read in solution for time t=%s" % self.t) | def read(self,frame,path='./_output',file_format='ascii',file_prefix=None,
read_aux=True,options={}, **kargs):
r"""
Reads in a Solution object from a file
Reads in and initializes this Solution with the data specified. This
function will raise an IOError if it was unsuccessful.
Any format must conform to the following call signiture and return
True if the file has been successfully read into the given solution or
False otherwise. Options is a dictionary of parameters that each
format can specify. See the ascii module for an example.::
read_<format>(solution,path,frame,file_prefix,options={})
``<format>`` is the name of the format in question.
:Input:
- *frame* - (int) Frame number to be read in
- *path* - (string) Base path to the files to be read.
``default = './'``
- *format* - (string) Format of the file, should match on of the
modules inside of the io package. ``default = 'ascii'``
- *file_prefix* - (string) Name prefix in front of all the files,
defaults to whatever the format defaults to, e.g. fort for ascii
- *options* - (dict) Dictionary of optional arguments dependent on
the format being read in. ``default = {}``
:Output:
- (bool) - True if read was successful, False otherwise
"""
if file_format=='petsc':
from clawpack.petclaw import io
path = os.path.expandvars(os.path.expanduser(path))
read_func = eval('io.read_%s' % file_format)
if file_prefix is None:
read_func(self,frame,path,read_aux=read_aux,options=options)
else:
read_func(self,frame,path,file_prefix=file_prefix,
read_aux=read_aux,options=options)
logging.getLogger('io').info("Read in solution for time t=%s" % self.t) |
Python | def is_valid(self):
r"""
Checks that all required solver attributes are set.
Checks to make sure that all the required attributes for the solver
have been set correctly. All required attributes that need to be set
are contained in the attributes list of the class.
Will post debug level logging message of which required attributes
have not been set.
:Output:
- *valid* - (bool) True if the solver is valid, False otherwise
"""
valid = True
if any([bcmeth == BC.custom for bcmeth in self.bc_lower]):
if self.user_bc_lower is None:
self.logger.debug('Lower custom BC function has not been set.')
valid = False
if any([bcmeth == BC.custom for bcmeth in self.bc_upper]):
if self.user_bc_upper is None:
self.logger.debug('Upper custom BC function has not been set.')
valid = False
return valid | def is_valid(self):
r"""
Checks that all required solver attributes are set.
Checks to make sure that all the required attributes for the solver
have been set correctly. All required attributes that need to be set
are contained in the attributes list of the class.
Will post debug level logging message of which required attributes
have not been set.
:Output:
- *valid* - (bool) True if the solver is valid, False otherwise
"""
valid = True
if any([bcmeth == BC.custom for bcmeth in self.bc_lower]):
if self.user_bc_lower is None:
self.logger.debug('Lower custom BC function has not been set.')
valid = False
if any([bcmeth == BC.custom for bcmeth in self.bc_upper]):
if self.user_bc_upper is None:
self.logger.debug('Upper custom BC function has not been set.')
valid = False
return valid |
Python | def allocate_bc_arrays(self,state):
r"""
Create numpy arrays for q and aux with ghost cells attached.
These arrays are referred to throughout the code as qbc and auxbc.
This is typically called by solver.setup().
"""
import numpy as np
qbc_dim = [n+2*self.num_ghost for n in state.grid.num_cells]
qbc_dim.insert(0,state.num_eqn)
self.qbc = np.zeros(qbc_dim,order='F')
auxbc_dim = [n+2*self.num_ghost for n in state.grid.num_cells]
auxbc_dim.insert(0,state.num_aux)
self.auxbc = np.empty(auxbc_dim,order='F')
if state.num_aux>0:
self.apply_aux_bcs(state) | def allocate_bc_arrays(self,state):
r"""
Create numpy arrays for q and aux with ghost cells attached.
These arrays are referred to throughout the code as qbc and auxbc.
This is typically called by solver.setup().
"""
import numpy as np
qbc_dim = [n+2*self.num_ghost for n in state.grid.num_cells]
qbc_dim.insert(0,state.num_eqn)
self.qbc = np.zeros(qbc_dim,order='F')
auxbc_dim = [n+2*self.num_ghost for n in state.grid.num_cells]
auxbc_dim.insert(0,state.num_aux)
self.auxbc = np.empty(auxbc_dim,order='F')
if state.num_aux>0:
self.apply_aux_bcs(state) |
Python | def apply_q_bcs(self,state):
r"""
Fills in solver.qbc (the local vector), including ghost cell values.
This function returns an array of dimension determined by the
:attr:`num_ghost` attribute. The type of boundary condition set is
determined by :attr:`bc_lower` and :attr:`bc_upper` for the
approprate dimension. Valid values for :attr:`bc_lower` and
:attr:`bc_upper` include:
- 'custom' or 0: A user defined boundary condition will be used, the appropriate
Dimension method user_bc_lower or user_bc_upper will be called.
- 'extrap' or 1: Zero-order extrapolation.
- 'periodic' or 2: Periodic boundary conditions.
- 'wall' or 3: Wall boundary conditions. It is assumed that the second
component of q represents velocity or momentum.
:Input:
- *grid* - (:class:`Patch`) The grid being operated on.
- *state* - The state being operated on; this may or may not be the
same as *grid*. Generally it is the same as *grid* for
the classic algorithms and other one-level algorithms,
but different for method-of-lines algorithms like SharpClaw.
:Output:
- (ndarray(num_eqn,...)) q array with boundary ghost cells added and set
.. note::
Note that for user-defined boundary conditions, the array sent to
the boundary condition has not been rolled.
"""
import numpy as np
self.qbc = state.get_qbc_from_q(self.num_ghost,self.qbc)
grid = state.grid
for idim,dim in enumerate(grid.dimensions):
# First check if we are actually on the boundary
# (in case of a parallel run)
if state.grid.lower[idim] == state.patch.lower_global[idim]:
# If a user defined boundary condition is being used, send it on,
# otherwise roll the axis to front position and operate on it
if self.bc_lower[idim] == BC.custom:
self.qbc_lower(state,dim,state.t,self.qbc,idim)
elif self.bc_lower[idim] == BC.periodic:
if state.grid.upper[idim] == state.patch.upper_global[idim]:
# This process owns the whole domain
self.qbc_lower(state,dim,state.t,np.rollaxis(self.qbc,idim+1,1),idim)
else:
pass #Handled automatically by PETSc
else:
self.qbc_lower(state,dim,state.t,np.rollaxis(self.qbc,idim+1,1),idim)
if state.grid.upper[idim] == state.patch.upper_global[idim]:
if self.bc_upper[idim] == BC.custom:
self.qbc_upper(state,dim,state.t,self.qbc,idim)
elif self.bc_upper[idim] == BC.periodic:
if state.grid.lower[idim] == state.patch.lower_global[idim]:
# This process owns the whole domain
self.qbc_upper(state,dim,state.t,np.rollaxis(self.qbc,idim+1,1),idim)
else:
pass #Handled automatically by PETSc
else:
self.qbc_upper(state,dim,state.t,np.rollaxis(self.qbc,idim+1,1),idim) | def apply_q_bcs(self,state):
r"""
Fills in solver.qbc (the local vector), including ghost cell values.
This function returns an array of dimension determined by the
:attr:`num_ghost` attribute. The type of boundary condition set is
determined by :attr:`bc_lower` and :attr:`bc_upper` for the
approprate dimension. Valid values for :attr:`bc_lower` and
:attr:`bc_upper` include:
- 'custom' or 0: A user defined boundary condition will be used, the appropriate
Dimension method user_bc_lower or user_bc_upper will be called.
- 'extrap' or 1: Zero-order extrapolation.
- 'periodic' or 2: Periodic boundary conditions.
- 'wall' or 3: Wall boundary conditions. It is assumed that the second
component of q represents velocity or momentum.
:Input:
- *grid* - (:class:`Patch`) The grid being operated on.
- *state* - The state being operated on; this may or may not be the
same as *grid*. Generally it is the same as *grid* for
the classic algorithms and other one-level algorithms,
but different for method-of-lines algorithms like SharpClaw.
:Output:
- (ndarray(num_eqn,...)) q array with boundary ghost cells added and set
.. note::
Note that for user-defined boundary conditions, the array sent to
the boundary condition has not been rolled.
"""
import numpy as np
self.qbc = state.get_qbc_from_q(self.num_ghost,self.qbc)
grid = state.grid
for idim,dim in enumerate(grid.dimensions):
# First check if we are actually on the boundary
# (in case of a parallel run)
if state.grid.lower[idim] == state.patch.lower_global[idim]:
# If a user defined boundary condition is being used, send it on,
# otherwise roll the axis to front position and operate on it
if self.bc_lower[idim] == BC.custom:
self.qbc_lower(state,dim,state.t,self.qbc,idim)
elif self.bc_lower[idim] == BC.periodic:
if state.grid.upper[idim] == state.patch.upper_global[idim]:
# This process owns the whole domain
self.qbc_lower(state,dim,state.t,np.rollaxis(self.qbc,idim+1,1),idim)
else:
pass #Handled automatically by PETSc
else:
self.qbc_lower(state,dim,state.t,np.rollaxis(self.qbc,idim+1,1),idim)
if state.grid.upper[idim] == state.patch.upper_global[idim]:
if self.bc_upper[idim] == BC.custom:
self.qbc_upper(state,dim,state.t,self.qbc,idim)
elif self.bc_upper[idim] == BC.periodic:
if state.grid.lower[idim] == state.patch.lower_global[idim]:
# This process owns the whole domain
self.qbc_upper(state,dim,state.t,np.rollaxis(self.qbc,idim+1,1),idim)
else:
pass #Handled automatically by PETSc
else:
self.qbc_upper(state,dim,state.t,np.rollaxis(self.qbc,idim+1,1),idim) |
Python | def qbc_lower(self,state,dim,t,qbc,idim):
r"""
Apply lower boundary conditions to qbc
Sets the lower coordinate's ghost cells of *qbc* depending on what
:attr:`bc_lower` is. If :attr:`bc_lower` = 0 then the user
boundary condition specified by :attr:`user_bc_lower` is used. Note
that in this case the function :attr:`user_bc_lower` belongs only to
this dimension but :attr:`user_bc_lower` could set all user boundary
conditions at once with the appropriate calling sequence.
:Input:
- *patch* - (:class:`Patch`) Patch that the dimension belongs to
:Input/Ouput:
- *qbc* - (ndarray(...,num_eqn)) Array with added ghost cells which will
be set in this routines
"""
if self.bc_lower[idim] == BC.custom:
self.user_bc_lower(state,dim,t,qbc,self.num_ghost)
elif self.bc_lower[idim] == BC.extrap:
for i in xrange(self.num_ghost):
qbc[:,i,...] = qbc[:,self.num_ghost,...]
elif self.bc_lower[idim] == BC.periodic:
# This process owns the whole patch
qbc[:,:self.num_ghost,...] = qbc[:,-2*self.num_ghost:-self.num_ghost,...]
elif self.bc_lower[idim] == BC.wall:
for i in xrange(self.num_ghost):
qbc[:,i,...] = qbc[:,2*self.num_ghost-1-i,...]
qbc[idim+1,i,...] = -qbc[idim+1,2*self.num_ghost-1-i,...] # Negate normal velocity
else:
raise NotImplementedError("Boundary condition %s not implemented" % self.bc_lower) | def qbc_lower(self,state,dim,t,qbc,idim):
r"""
Apply lower boundary conditions to qbc
Sets the lower coordinate's ghost cells of *qbc* depending on what
:attr:`bc_lower` is. If :attr:`bc_lower` = 0 then the user
boundary condition specified by :attr:`user_bc_lower` is used. Note
that in this case the function :attr:`user_bc_lower` belongs only to
this dimension but :attr:`user_bc_lower` could set all user boundary
conditions at once with the appropriate calling sequence.
:Input:
- *patch* - (:class:`Patch`) Patch that the dimension belongs to
:Input/Ouput:
- *qbc* - (ndarray(...,num_eqn)) Array with added ghost cells which will
be set in this routines
"""
if self.bc_lower[idim] == BC.custom:
self.user_bc_lower(state,dim,t,qbc,self.num_ghost)
elif self.bc_lower[idim] == BC.extrap:
for i in xrange(self.num_ghost):
qbc[:,i,...] = qbc[:,self.num_ghost,...]
elif self.bc_lower[idim] == BC.periodic:
# This process owns the whole patch
qbc[:,:self.num_ghost,...] = qbc[:,-2*self.num_ghost:-self.num_ghost,...]
elif self.bc_lower[idim] == BC.wall:
for i in xrange(self.num_ghost):
qbc[:,i,...] = qbc[:,2*self.num_ghost-1-i,...]
qbc[idim+1,i,...] = -qbc[idim+1,2*self.num_ghost-1-i,...] # Negate normal velocity
else:
raise NotImplementedError("Boundary condition %s not implemented" % self.bc_lower) |
Python | def qbc_upper(self,state,dim,t,qbc,idim):
r"""
Apply upper boundary conditions to qbc
Sets the upper coordinate's ghost cells of *qbc* depending on what
:attr:`bc_upper` is. If :attr:`bc_upper` = 0 then the user
boundary condition specified by :attr:`user_bc_upper` is used. Note
that in this case the function :attr:`user_bc_upper` belongs only to
this dimension but :attr:`user_bc_upper` could set all user boundary
conditions at once with the appropriate calling sequence.
:Input:
- *patch* - (:class:`Patch`) Patch that the dimension belongs to
:Input/Ouput:
- *qbc* - (ndarray(...,num_eqn)) Array with added ghost cells which will
be set in this routines
"""
if self.bc_upper[idim] == BC.custom:
self.user_bc_upper(state,dim,t,qbc,self.num_ghost)
elif self.bc_upper[idim] == BC.extrap:
for i in xrange(self.num_ghost):
qbc[:,-i-1,...] = qbc[:,-self.num_ghost-1,...]
elif self.bc_upper[idim] == BC.periodic:
# This process owns the whole patch
qbc[:,-self.num_ghost:,...] = qbc[:,self.num_ghost:2*self.num_ghost,...]
elif self.bc_upper[idim] == BC.wall:
for i in xrange(self.num_ghost):
qbc[:,-i-1,...] = qbc[:,-2*self.num_ghost+i,...]
qbc[idim+1,-i-1,...] = -qbc[idim+1,-2*self.num_ghost+i,...] # Negate normal velocity
else:
raise NotImplementedError("Boundary condition %s not implemented" % self.bc_lower) | def qbc_upper(self,state,dim,t,qbc,idim):
r"""
Apply upper boundary conditions to qbc
Sets the upper coordinate's ghost cells of *qbc* depending on what
:attr:`bc_upper` is. If :attr:`bc_upper` = 0 then the user
boundary condition specified by :attr:`user_bc_upper` is used. Note
that in this case the function :attr:`user_bc_upper` belongs only to
this dimension but :attr:`user_bc_upper` could set all user boundary
conditions at once with the appropriate calling sequence.
:Input:
- *patch* - (:class:`Patch`) Patch that the dimension belongs to
:Input/Ouput:
- *qbc* - (ndarray(...,num_eqn)) Array with added ghost cells which will
be set in this routines
"""
if self.bc_upper[idim] == BC.custom:
self.user_bc_upper(state,dim,t,qbc,self.num_ghost)
elif self.bc_upper[idim] == BC.extrap:
for i in xrange(self.num_ghost):
qbc[:,-i-1,...] = qbc[:,-self.num_ghost-1,...]
elif self.bc_upper[idim] == BC.periodic:
# This process owns the whole patch
qbc[:,-self.num_ghost:,...] = qbc[:,self.num_ghost:2*self.num_ghost,...]
elif self.bc_upper[idim] == BC.wall:
for i in xrange(self.num_ghost):
qbc[:,-i-1,...] = qbc[:,-2*self.num_ghost+i,...]
qbc[idim+1,-i-1,...] = -qbc[idim+1,-2*self.num_ghost+i,...] # Negate normal velocity
else:
raise NotImplementedError("Boundary condition %s not implemented" % self.bc_lower) |
Python | def apply_aux_bcs(self,state):
r"""
Appends boundary cells to aux and fills them with appropriate values.
This function returns an array of dimension determined by the
:attr:`num_ghost` attribute. The type of boundary condition set is
determined by :attr:`aux_bc_lower` and :attr:`aux_bc_upper` for the
approprate dimension. Valid values for :attr:`aux_bc_lower` and
:attr:`aux_bc_upper` include:
- 'custom' or 0: A user defined boundary condition will be used, the appropriate
Dimension method user_aux_bc_lower or user_aux_bc_upper will be called.
- 'extrap' or 1: Zero-order extrapolation.
- 'periodic' or 2: Periodic boundary conditions.
- 'wall' or 3: Wall boundary conditions. It is assumed that the second
component of q represents velocity or momentum.
:Input:
- *patch* - (:class:`Patch`) The patch being operated on.
- *state* - The state being operated on; this may or may not be the
same as *patch*. Generally it is the same as *patch* for
the classic algorithms and other one-level algorithms,
but different for method-of-lines algorithms like SharpClaw.
:Output:
- (ndarray(num_aux,...)) q array with boundary ghost cells added and set
.. note::
Note that for user-defined boundary conditions, the array sent to
the boundary condition has not been rolled.
"""
import numpy as np
self.auxbc = state.get_auxbc_from_aux(self.num_ghost,self.auxbc)
patch = state.patch
for idim,dim in enumerate(patch.dimensions):
# First check if we are actually on the boundary
# (in case of a parallel run)
if state.grid.lower[idim] == state.patch.lower_global[idim]:
# If a user defined boundary condition is being used, send it on,
# otherwise roll the axis to front position and operate on it
if self.aux_bc_lower[idim] == BC.custom:
self.auxbc_lower(state,dim,state.t,self.auxbc,idim)
elif self.aux_bc_lower[idim] == BC.periodic:
if state.grid.upper[idim] == state.patch.upper_global[idim]:
# This process owns the whole patch
self.auxbc_lower(state,dim,state.t,np.rollaxis(self.auxbc,idim+1,1),idim)
else:
pass #Handled automatically by PETSc
else:
self.auxbc_lower(state,dim,state.t,np.rollaxis(self.auxbc,idim+1,1),idim)
if state.grid.upper[idim] == state.patch.upper_global[idim]:
if self.aux_bc_upper[idim] == BC.custom:
self.auxbc_upper(state,dim,state.t,self.auxbc,idim)
elif self.aux_bc_upper[idim] == BC.periodic:
if state.grid.lower[idim] == state.patch.lower_global[idim]:
# This process owns the whole patch
self.auxbc_upper(state,dim,state.t,np.rollaxis(self.auxbc,idim+1,1),idim)
else:
pass #Handled automatically by PETSc
else:
self.auxbc_upper(state,dim,state.t,np.rollaxis(self.auxbc,idim+1,1),idim) | def apply_aux_bcs(self,state):
r"""
Appends boundary cells to aux and fills them with appropriate values.
This function returns an array of dimension determined by the
:attr:`num_ghost` attribute. The type of boundary condition set is
determined by :attr:`aux_bc_lower` and :attr:`aux_bc_upper` for the
approprate dimension. Valid values for :attr:`aux_bc_lower` and
:attr:`aux_bc_upper` include:
- 'custom' or 0: A user defined boundary condition will be used, the appropriate
Dimension method user_aux_bc_lower or user_aux_bc_upper will be called.
- 'extrap' or 1: Zero-order extrapolation.
- 'periodic' or 2: Periodic boundary conditions.
- 'wall' or 3: Wall boundary conditions. It is assumed that the second
component of q represents velocity or momentum.
:Input:
- *patch* - (:class:`Patch`) The patch being operated on.
- *state* - The state being operated on; this may or may not be the
same as *patch*. Generally it is the same as *patch* for
the classic algorithms and other one-level algorithms,
but different for method-of-lines algorithms like SharpClaw.
:Output:
- (ndarray(num_aux,...)) q array with boundary ghost cells added and set
.. note::
Note that for user-defined boundary conditions, the array sent to
the boundary condition has not been rolled.
"""
import numpy as np
self.auxbc = state.get_auxbc_from_aux(self.num_ghost,self.auxbc)
patch = state.patch
for idim,dim in enumerate(patch.dimensions):
# First check if we are actually on the boundary
# (in case of a parallel run)
if state.grid.lower[idim] == state.patch.lower_global[idim]:
# If a user defined boundary condition is being used, send it on,
# otherwise roll the axis to front position and operate on it
if self.aux_bc_lower[idim] == BC.custom:
self.auxbc_lower(state,dim,state.t,self.auxbc,idim)
elif self.aux_bc_lower[idim] == BC.periodic:
if state.grid.upper[idim] == state.patch.upper_global[idim]:
# This process owns the whole patch
self.auxbc_lower(state,dim,state.t,np.rollaxis(self.auxbc,idim+1,1),idim)
else:
pass #Handled automatically by PETSc
else:
self.auxbc_lower(state,dim,state.t,np.rollaxis(self.auxbc,idim+1,1),idim)
if state.grid.upper[idim] == state.patch.upper_global[idim]:
if self.aux_bc_upper[idim] == BC.custom:
self.auxbc_upper(state,dim,state.t,self.auxbc,idim)
elif self.aux_bc_upper[idim] == BC.periodic:
if state.grid.lower[idim] == state.patch.lower_global[idim]:
# This process owns the whole patch
self.auxbc_upper(state,dim,state.t,np.rollaxis(self.auxbc,idim+1,1),idim)
else:
pass #Handled automatically by PETSc
else:
self.auxbc_upper(state,dim,state.t,np.rollaxis(self.auxbc,idim+1,1),idim) |
Python | def auxbc_lower(self,state,dim,t,auxbc,idim):
r"""
Apply lower boundary conditions to auxbc
Sets the lower coordinate's ghost cells of *auxbc* depending on what
:attr:`aux_bc_lower` is. If :attr:`aux_bc_lower` = 0 then the user
boundary condition specified by :attr:`user_aux_bc_lower` is used. Note
that in this case the function :attr:`user_aux_bc_lower` belongs only to
this dimension but :attr:`user_aux_bc_lower` could set all user boundary
conditions at once with the appropriate calling sequence.
:Input:
- *patch* - (:class:`Patch`) Patch that the dimension belongs to
:Input/Ouput:
- *auxbc* - (ndarray(num_aux,...)) Array with added ghost cells which will
be set in this routines
"""
if self.aux_bc_lower[idim] == BC.custom:
self.user_aux_bc_lower(state,dim,t,auxbc,self.num_ghost)
elif self.aux_bc_lower[idim] == BC.extrap:
for i in xrange(self.num_ghost):
auxbc[:,i,...] = auxbc[:,self.num_ghost,...]
elif self.aux_bc_lower[idim] == BC.periodic:
# This process owns the whole patch
auxbc[:,:self.num_ghost,...] = auxbc[:,-2*self.num_ghost:-self.num_ghost,...]
elif self.aux_bc_lower[idim] == BC.wall:
for i in xrange(self.num_ghost):
auxbc[:,i,...] = auxbc[:,2*self.num_ghost-1-i,...]
elif self.aux_bc_lower[idim] is None:
raise Exception("One or more of the aux boundary conditions aux_bc_upper has not been specified.")
else:
raise NotImplementedError("Boundary condition %s not implemented" % self.aux_bc_lower) | def auxbc_lower(self,state,dim,t,auxbc,idim):
r"""
Apply lower boundary conditions to auxbc
Sets the lower coordinate's ghost cells of *auxbc* depending on what
:attr:`aux_bc_lower` is. If :attr:`aux_bc_lower` = 0 then the user
boundary condition specified by :attr:`user_aux_bc_lower` is used. Note
that in this case the function :attr:`user_aux_bc_lower` belongs only to
this dimension but :attr:`user_aux_bc_lower` could set all user boundary
conditions at once with the appropriate calling sequence.
:Input:
- *patch* - (:class:`Patch`) Patch that the dimension belongs to
:Input/Ouput:
- *auxbc* - (ndarray(num_aux,...)) Array with added ghost cells which will
be set in this routines
"""
if self.aux_bc_lower[idim] == BC.custom:
self.user_aux_bc_lower(state,dim,t,auxbc,self.num_ghost)
elif self.aux_bc_lower[idim] == BC.extrap:
for i in xrange(self.num_ghost):
auxbc[:,i,...] = auxbc[:,self.num_ghost,...]
elif self.aux_bc_lower[idim] == BC.periodic:
# This process owns the whole patch
auxbc[:,:self.num_ghost,...] = auxbc[:,-2*self.num_ghost:-self.num_ghost,...]
elif self.aux_bc_lower[idim] == BC.wall:
for i in xrange(self.num_ghost):
auxbc[:,i,...] = auxbc[:,2*self.num_ghost-1-i,...]
elif self.aux_bc_lower[idim] is None:
raise Exception("One or more of the aux boundary conditions aux_bc_upper has not been specified.")
else:
raise NotImplementedError("Boundary condition %s not implemented" % self.aux_bc_lower) |
Python | def auxbc_upper(self,state,dim,t,auxbc,idim):
r"""
Apply upper boundary conditions to auxbc
Sets the upper coordinate's ghost cells of *auxbc* depending on what
:attr:`aux_bc_upper` is. If :attr:`aux_bc_upper` = 0 then the user
boundary condition specified by :attr:`user_aux_bc_upper` is used. Note
that in this case the function :attr:`user_aux_bc_upper` belongs only to
this dimension but :attr:`user_aux_bc_upper` could set all user boundary
conditions at once with the appropriate calling sequence.
:Input:
- *patch* - (:class:`Patch`) Patch that the dimension belongs to
:Input/Ouput:
- *auxbc* - (ndarray(num_aux,...)) Array with added ghost cells which will
be set in this routines
"""
if self.aux_bc_upper[idim] == BC.custom:
self.user_aux_bc_upper(state,dim,t,auxbc,self.num_ghost)
elif self.aux_bc_upper[idim] == BC.extrap:
for i in xrange(self.num_ghost):
auxbc[:,-i-1,...] = auxbc[:,-self.num_ghost-1,...]
elif self.aux_bc_upper[idim] == BC.periodic:
# This process owns the whole patch
auxbc[:,-self.num_ghost:,...] = auxbc[:,self.num_ghost:2*self.num_ghost,...]
elif self.aux_bc_upper[idim] == BC.wall:
for i in xrange(self.num_ghost):
auxbc[:,-i-1,...] = auxbc[:,-2*self.num_ghost+i,...]
elif self.aux_bc_lower[idim] is None:
raise Exception("One or more of the aux boundary conditions aux_bc_lower has not been specified.")
else:
raise NotImplementedError("Boundary condition %s not implemented" % self.aux_bc_lower) | def auxbc_upper(self,state,dim,t,auxbc,idim):
r"""
Apply upper boundary conditions to auxbc
Sets the upper coordinate's ghost cells of *auxbc* depending on what
:attr:`aux_bc_upper` is. If :attr:`aux_bc_upper` = 0 then the user
boundary condition specified by :attr:`user_aux_bc_upper` is used. Note
that in this case the function :attr:`user_aux_bc_upper` belongs only to
this dimension but :attr:`user_aux_bc_upper` could set all user boundary
conditions at once with the appropriate calling sequence.
:Input:
- *patch* - (:class:`Patch`) Patch that the dimension belongs to
:Input/Ouput:
- *auxbc* - (ndarray(num_aux,...)) Array with added ghost cells which will
be set in this routines
"""
if self.aux_bc_upper[idim] == BC.custom:
self.user_aux_bc_upper(state,dim,t,auxbc,self.num_ghost)
elif self.aux_bc_upper[idim] == BC.extrap:
for i in xrange(self.num_ghost):
auxbc[:,-i-1,...] = auxbc[:,-self.num_ghost-1,...]
elif self.aux_bc_upper[idim] == BC.periodic:
# This process owns the whole patch
auxbc[:,-self.num_ghost:,...] = auxbc[:,self.num_ghost:2*self.num_ghost,...]
elif self.aux_bc_upper[idim] == BC.wall:
for i in xrange(self.num_ghost):
auxbc[:,-i-1,...] = auxbc[:,-2*self.num_ghost+i,...]
elif self.aux_bc_lower[idim] is None:
raise Exception("One or more of the aux boundary conditions aux_bc_lower has not been specified.")
else:
raise NotImplementedError("Boundary condition %s not implemented" % self.aux_bc_lower) |
Python | def evolve_to_time(self,solution,tend=None):
r"""
Evolve solution from solution.t to tend. If tend is not specified,
take a single step.
This method contains the machinery to evolve the solution object in
``solution`` to the requested end time tend if given, or one
step if not.
:Input:
- *solution* - (:class:`Solution`) Solution to be evolved
- *tend* - (float) The end time to evolve to, if not provided then
the method will take a single time step.
:Output:
- (dict) - Returns the status dictionary of the solver
"""
if not self._is_set_up:
self.setup(solution)
if tend == None:
take_one_step = True
else:
take_one_step = False
# Parameters for time-stepping
tstart = solution.t
# Reset status dictionary
self.status['cflmax'] = self.cfl.get_cached_max()
self.status['dtmin'] = self.dt
self.status['dtmax'] = self.dt
self.status['numsteps'] = 0
# Setup for the run
if not self.dt_variable:
if take_one_step:
self.max_steps = 1
else:
self.max_steps = int((tend - tstart + 1e-10) / self.dt)
if abs(self.max_steps*self.dt - (tend - tstart)) > 1e-5 * (tend-tstart):
raise Exception('dt does not divide (tend-tstart) and dt is fixed!')
if self.dt_variable == 1 and self.cfl_desired > self.cfl_max:
raise Exception('Variable time-stepping and desired CFL > maximum CFL')
if tend <= tstart and not take_one_step:
self.logger.info("Already at or beyond end time: no evolution required.")
self.max_steps = 0
# Main time-stepping loop
for n in xrange(self.max_steps):
state = solution.state
# Adjust dt so that we hit tend exactly if we are near tend
if not take_one_step:
if solution.t + self.dt > tend and tstart < tend:
self.dt = tend - solution.t
if tend - solution.t - self.dt < 1.e-14:
self.dt = tend - solution.t
# Keep a backup in case we need to retake a time step
if self.dt_variable:
q_backup = state.q.copy('F')
told = solution.t
self.step(solution)
# Check to make sure that the Courant number was not too large
cfl = self.cfl.get_cached_max()
if cfl <= self.cfl_max:
# Accept this step
self.status['cflmax'] = max(cfl, self.status['cflmax'])
if self.dt_variable==True:
solution.t += self.dt
else:
#Avoid roundoff error if dt_variable=False:
solution.t = tstart+(n+1)*self.dt
# Verbose messaging
self.logger.debug("Step %i CFL = %f dt = %f t = %f"
% (n,cfl,self.dt,solution.t))
self.write_gauge_values(solution)
# Increment number of time steps completed
self.status['numsteps'] += 1
else:
# Reject this step
self.logger.debug("Rejecting time step, CFL number too large")
if self.dt_variable:
state.q = q_backup
solution.t = told
else:
# Give up, we cannot adapt, abort
self.status['cflmax'] = \
max(cfl, self.status['cflmax'])
raise Exception('CFL too large, giving up!')
# Choose new time step
if self.dt_variable:
if cfl > 0.0:
self.dt = min(self.dt_max,self.dt * self.cfl_desired
/ cfl)
self.status['dtmin'] = min(self.dt, self.status['dtmin'])
self.status['dtmax'] = max(self.dt, self.status['dtmax'])
else:
self.dt = self.dt_max
# See if we are finished yet
if (solution.t >= tend and not take_one_step) or (take_one_step and self.status['numsteps'] > 0):
break
# End of main time-stepping loop -------------------------------------
if self.dt_variable and solution.t < tend \
and self.status['numsteps'] == self.max_steps:
raise Exception("Maximum number of timesteps have been taken")
return self.status | def evolve_to_time(self,solution,tend=None):
r"""
Evolve solution from solution.t to tend. If tend is not specified,
take a single step.
This method contains the machinery to evolve the solution object in
``solution`` to the requested end time tend if given, or one
step if not.
:Input:
- *solution* - (:class:`Solution`) Solution to be evolved
- *tend* - (float) The end time to evolve to, if not provided then
the method will take a single time step.
:Output:
- (dict) - Returns the status dictionary of the solver
"""
if not self._is_set_up:
self.setup(solution)
if tend == None:
take_one_step = True
else:
take_one_step = False
# Parameters for time-stepping
tstart = solution.t
# Reset status dictionary
self.status['cflmax'] = self.cfl.get_cached_max()
self.status['dtmin'] = self.dt
self.status['dtmax'] = self.dt
self.status['numsteps'] = 0
# Setup for the run
if not self.dt_variable:
if take_one_step:
self.max_steps = 1
else:
self.max_steps = int((tend - tstart + 1e-10) / self.dt)
if abs(self.max_steps*self.dt - (tend - tstart)) > 1e-5 * (tend-tstart):
raise Exception('dt does not divide (tend-tstart) and dt is fixed!')
if self.dt_variable == 1 and self.cfl_desired > self.cfl_max:
raise Exception('Variable time-stepping and desired CFL > maximum CFL')
if tend <= tstart and not take_one_step:
self.logger.info("Already at or beyond end time: no evolution required.")
self.max_steps = 0
# Main time-stepping loop
for n in xrange(self.max_steps):
state = solution.state
# Adjust dt so that we hit tend exactly if we are near tend
if not take_one_step:
if solution.t + self.dt > tend and tstart < tend:
self.dt = tend - solution.t
if tend - solution.t - self.dt < 1.e-14:
self.dt = tend - solution.t
# Keep a backup in case we need to retake a time step
if self.dt_variable:
q_backup = state.q.copy('F')
told = solution.t
self.step(solution)
# Check to make sure that the Courant number was not too large
cfl = self.cfl.get_cached_max()
if cfl <= self.cfl_max:
# Accept this step
self.status['cflmax'] = max(cfl, self.status['cflmax'])
if self.dt_variable==True:
solution.t += self.dt
else:
#Avoid roundoff error if dt_variable=False:
solution.t = tstart+(n+1)*self.dt
# Verbose messaging
self.logger.debug("Step %i CFL = %f dt = %f t = %f"
% (n,cfl,self.dt,solution.t))
self.write_gauge_values(solution)
# Increment number of time steps completed
self.status['numsteps'] += 1
else:
# Reject this step
self.logger.debug("Rejecting time step, CFL number too large")
if self.dt_variable:
state.q = q_backup
solution.t = told
else:
# Give up, we cannot adapt, abort
self.status['cflmax'] = \
max(cfl, self.status['cflmax'])
raise Exception('CFL too large, giving up!')
# Choose new time step
if self.dt_variable:
if cfl > 0.0:
self.dt = min(self.dt_max,self.dt * self.cfl_desired
/ cfl)
self.status['dtmin'] = min(self.dt, self.status['dtmin'])
self.status['dtmax'] = max(self.dt, self.status['dtmax'])
else:
self.dt = self.dt_max
# See if we are finished yet
if (solution.t >= tend and not take_one_step) or (take_one_step and self.status['numsteps'] > 0):
break
# End of main time-stepping loop -------------------------------------
if self.dt_variable and solution.t < tend \
and self.status['numsteps'] == self.max_steps:
raise Exception("Maximum number of timesteps have been taken")
return self.status |
Python | def step(self,solution):
r"""
Take one step
This method is only a stub and should be overridden by all solvers who
would like to use the default time-stepping in evolve_to_time.
"""
raise NotImplementedError("No stepping routine has been defined!") | def step(self,solution):
r"""
Take one step
This method is only a stub and should be overridden by all solvers who
would like to use the default time-stepping in evolve_to_time.
"""
raise NotImplementedError("No stepping routine has been defined!") |
Python | def write_gauge_values(self,solution):
r"""Write solution (or derived quantity) values at each gauge coordinate
to file.
"""
for i,gauge in enumerate(solution.state.grid.gauges):
if self.num_dim == 1:
ix=gauge[0];
aux=solution.state.aux[:,ix]
q=solution.state.q[:,ix]
elif self.num_dim == 2:
ix=gauge[0]; iy=gauge[1]
aux=solution.state.aux[:,ix,iy]
q=solution.state.q[:,ix,iy]
p=self.compute_gauge_values(q,aux)
t=solution.t
solution.state.grid.gauge_files[i].write(str(t)+' '+' '.join(str(j) for j in p)+'\n') | def write_gauge_values(self,solution):
r"""Write solution (or derived quantity) values at each gauge coordinate
to file.
"""
for i,gauge in enumerate(solution.state.grid.gauges):
if self.num_dim == 1:
ix=gauge[0];
aux=solution.state.aux[:,ix]
q=solution.state.q[:,ix]
elif self.num_dim == 2:
ix=gauge[0]; iy=gauge[1]
aux=solution.state.aux[:,ix,iy]
q=solution.state.q[:,ix,iy]
p=self.compute_gauge_values(q,aux)
t=solution.t
solution.state.grid.gauge_files[i].write(str(t)+' '+' '.join(str(j) for j in p)+'\n') |
Python | def wcblast(use_petsc=False,iplot=False,htmlplot=False,outdir='./_output',solver_type='classic'):
"""
Solve the Euler equations of compressible fluid dynamics.
This example involves a pair of interacting shock waves.
The conserved quantities are density, momentum density, and total energy density.
"""
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
else:
solver = pyclaw.ClawSolver1D()
from clawpack import riemann
solver.rp = riemann.rp1_euler_with_efix
solver.num_waves = 3
solver.bc_lower[0]=pyclaw.BC.wall
solver.bc_upper[0]=pyclaw.BC.wall
# Initialize domain
mx=500;
x = pyclaw.Dimension('x',0.0,1.0,mx)
domain = pyclaw.Domain([x])
num_eqn = 3
state = pyclaw.State(domain,num_eqn)
state.problem_data['gamma']= gamma
state.problem_data['gamma1']= gamma1
state.q[0,:] = 1.
state.q[1,:] = 0.
x =state.grid.x.centers
state.q[2,:] = ( (x<0.1)*1.e3 + (0.1<=x)*(x<0.9)*1.e-2 + (0.9<=x)*1.e2 ) / gamma1
solver.limiters = 4
claw = pyclaw.Controller()
claw.tfinal = 0.038
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.num_output_times = 10
claw.outdir = outdir
# Solve
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir)
return claw.solution.q | def wcblast(use_petsc=False,iplot=False,htmlplot=False,outdir='./_output',solver_type='classic'):
"""
Solve the Euler equations of compressible fluid dynamics.
This example involves a pair of interacting shock waves.
The conserved quantities are density, momentum density, and total energy density.
"""
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
else:
solver = pyclaw.ClawSolver1D()
from clawpack import riemann
solver.rp = riemann.rp1_euler_with_efix
solver.num_waves = 3
solver.bc_lower[0]=pyclaw.BC.wall
solver.bc_upper[0]=pyclaw.BC.wall
# Initialize domain
mx=500;
x = pyclaw.Dimension('x',0.0,1.0,mx)
domain = pyclaw.Domain([x])
num_eqn = 3
state = pyclaw.State(domain,num_eqn)
state.problem_data['gamma']= gamma
state.problem_data['gamma1']= gamma1
state.q[0,:] = 1.
state.q[1,:] = 0.
x =state.grid.x.centers
state.q[2,:] = ( (x<0.1)*1.e3 + (0.1<=x)*(x<0.9)*1.e-2 + (0.9<=x)*1.e2 ) / gamma1
solver.limiters = 4
claw = pyclaw.Controller()
claw.tfinal = 0.038
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.num_output_times = 10
claw.outdir = outdir
# Solve
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir)
return claw.solution.q |
Python | def write_petsc(solution,frame,path='./',file_prefix='claw',write_aux=False,options={},write_p=False):
r"""
Write out pickle and PETSc data files representing the
solution. Common data is written from process 0 in pickle
files. Shared data is written from all processes into PETSc
data files.
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) pyclaw
object to be output
- *frame* - (int) Frame number
- *path* - (string) Root path
- *file_prefix* - (string) Prefix for the file name. ``default =
'claw'``
- *write_aux* - (bool) Boolean controlling whether the associated
auxiliary array should be written out. ``default = False``
- *options* - (dict) Optional argument dictionary, see
`PETScIO Option Table`_
.. _`PETScIO Option Table`:
format : one of 'ascii' or 'binary'
clobber : if True (Default), files will be overwritten
"""
# Option parsing
option_defaults = {'format':'binary','clobber':True}
for k in option_defaults.iterkeys():
if options.has_key(k):
pass
else:
options[k] = option_defaults[k]
clobber = options['clobber']
pickle_filename = os.path.join(path, '%s.pkl' % file_prefix) + str(frame).zfill(4)
if options['format']=='vtk':
viewer_filename = os.path.join(path, file_prefix+str(frame).zfill(4)+'.vtk')
else:
viewer_filename = os.path.join(path, '%s.ptc' % file_prefix) + str(frame).zfill(4)
if solution.num_aux > 0 and write_aux:
write_aux = True
aux_filename = os.path.join(path, '%s_aux.ptc' % file_prefix)
else:
write_aux = False
if not clobber:
if os.path.exists(pickle_filename):
raise IOError('Cowardly refusing to clobber %s!' % pickle_filename)
if os.path.exists(viewer_filename):
raise IOError('Cowardly refusing to clobber %s!' % viewer_filename)
if write_aux and os.path.exists(aux_filename):
raise IOError('Cowardly refusing to clobber %s!' % aux_filename)
rank = PETSc.Comm.getRank(PETSc.COMM_WORLD)
if rank==0:
pickle_file = open(pickle_filename,'wb')
# explicitly dumping a dictionary here to help out anybody trying to read the pickle file
if write_p:
pickle.dump({'t':solution.t,'num_eqn':solution.mp,'nstates':len(solution.states),
'num_aux':solution.num_aux,'num_dim':solution.domain.num_dim,'write_aux':write_aux,
'problem_data' : solution.problem_data}, pickle_file)
else:
pickle.dump({'t':solution.t,'num_eqn':solution.num_eqn,'nstates':len(solution.states),
'num_aux':solution.num_aux,'num_dim':solution.domain.num_dim,'write_aux':write_aux,
'problem_data' : solution.problem_data}, pickle_file)
# now set up the PETSc viewers
if options['format'] == 'ascii':
viewer = PETSc.Viewer().createASCII(viewer_filename, PETSc.Viewer.Mode.WRITE)
if write_aux:
aux_viewer = PETSc.Viewer().createASCII(aux_filename, PETSc.Viewer.Mode.WRITE)
elif options['format'] == 'binary':
if hasattr(PETSc.Viewer,'createMPIIO'):
viewer = PETSc.Viewer().createMPIIO(viewer_filename, PETSc.Viewer.Mode.WRITE)
else:
viewer = PETSc.Viewer().createBinary(viewer_filename, PETSc.Viewer.Mode.WRITE)
if write_aux:
if hasattr(PETSc.Viewer,'createMPIIO'):
aux_viewer = PETSc.Viewer().createMPIIO(aux_filename, PETSc.Viewer.Mode.WRITE)
else:
aux_viewer = PETSc.Viewer().createBinary(aux_filename, PETSc.Viewer.Mode.WRITE)
elif options['format'] == 'vtk':
viewer = PETSc.Viewer().createASCII(viewer_filename, PETSc.Viewer.Mode.WRITE, format=PETSc.Viewer.Format.ASCII_VTK)
if write_aux:
aux_viewer = PETSc.Viewer().createASCII(aux_filename, PETSc.Viewer.Mode.WRITE)
else:
raise IOError('format type %s not supported' % options['format'])
for state in solution.states:
patch = state.patch
if rank==0:
pickle.dump({'level':patch.level,
'names':patch.name,'lower':patch.lower_global,
'num_cells':patch.num_cells_global,'delta':patch.delta}, pickle_file)
# we will reenable this bad boy when we switch over to petsc-dev
# state.q_da.view(viewer)
if write_p:
state.gpVec.view(viewer)
else:
state.gqVec.view(viewer)
if write_aux:
state.gauxVec.view(aux_viewer)
viewer.flush()
viewer.destroy()
if rank==0:
pickle_file.close() | def write_petsc(solution,frame,path='./',file_prefix='claw',write_aux=False,options={},write_p=False):
r"""
Write out pickle and PETSc data files representing the
solution. Common data is written from process 0 in pickle
files. Shared data is written from all processes into PETSc
data files.
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) pyclaw
object to be output
- *frame* - (int) Frame number
- *path* - (string) Root path
- *file_prefix* - (string) Prefix for the file name. ``default =
'claw'``
- *write_aux* - (bool) Boolean controlling whether the associated
auxiliary array should be written out. ``default = False``
- *options* - (dict) Optional argument dictionary, see
`PETScIO Option Table`_
.. _`PETScIO Option Table`:
format : one of 'ascii' or 'binary'
clobber : if True (Default), files will be overwritten
"""
# Option parsing
option_defaults = {'format':'binary','clobber':True}
for k in option_defaults.iterkeys():
if options.has_key(k):
pass
else:
options[k] = option_defaults[k]
clobber = options['clobber']
pickle_filename = os.path.join(path, '%s.pkl' % file_prefix) + str(frame).zfill(4)
if options['format']=='vtk':
viewer_filename = os.path.join(path, file_prefix+str(frame).zfill(4)+'.vtk')
else:
viewer_filename = os.path.join(path, '%s.ptc' % file_prefix) + str(frame).zfill(4)
if solution.num_aux > 0 and write_aux:
write_aux = True
aux_filename = os.path.join(path, '%s_aux.ptc' % file_prefix)
else:
write_aux = False
if not clobber:
if os.path.exists(pickle_filename):
raise IOError('Cowardly refusing to clobber %s!' % pickle_filename)
if os.path.exists(viewer_filename):
raise IOError('Cowardly refusing to clobber %s!' % viewer_filename)
if write_aux and os.path.exists(aux_filename):
raise IOError('Cowardly refusing to clobber %s!' % aux_filename)
rank = PETSc.Comm.getRank(PETSc.COMM_WORLD)
if rank==0:
pickle_file = open(pickle_filename,'wb')
# explicitly dumping a dictionary here to help out anybody trying to read the pickle file
if write_p:
pickle.dump({'t':solution.t,'num_eqn':solution.mp,'nstates':len(solution.states),
'num_aux':solution.num_aux,'num_dim':solution.domain.num_dim,'write_aux':write_aux,
'problem_data' : solution.problem_data}, pickle_file)
else:
pickle.dump({'t':solution.t,'num_eqn':solution.num_eqn,'nstates':len(solution.states),
'num_aux':solution.num_aux,'num_dim':solution.domain.num_dim,'write_aux':write_aux,
'problem_data' : solution.problem_data}, pickle_file)
# now set up the PETSc viewers
if options['format'] == 'ascii':
viewer = PETSc.Viewer().createASCII(viewer_filename, PETSc.Viewer.Mode.WRITE)
if write_aux:
aux_viewer = PETSc.Viewer().createASCII(aux_filename, PETSc.Viewer.Mode.WRITE)
elif options['format'] == 'binary':
if hasattr(PETSc.Viewer,'createMPIIO'):
viewer = PETSc.Viewer().createMPIIO(viewer_filename, PETSc.Viewer.Mode.WRITE)
else:
viewer = PETSc.Viewer().createBinary(viewer_filename, PETSc.Viewer.Mode.WRITE)
if write_aux:
if hasattr(PETSc.Viewer,'createMPIIO'):
aux_viewer = PETSc.Viewer().createMPIIO(aux_filename, PETSc.Viewer.Mode.WRITE)
else:
aux_viewer = PETSc.Viewer().createBinary(aux_filename, PETSc.Viewer.Mode.WRITE)
elif options['format'] == 'vtk':
viewer = PETSc.Viewer().createASCII(viewer_filename, PETSc.Viewer.Mode.WRITE, format=PETSc.Viewer.Format.ASCII_VTK)
if write_aux:
aux_viewer = PETSc.Viewer().createASCII(aux_filename, PETSc.Viewer.Mode.WRITE)
else:
raise IOError('format type %s not supported' % options['format'])
for state in solution.states:
patch = state.patch
if rank==0:
pickle.dump({'level':patch.level,
'names':patch.name,'lower':patch.lower_global,
'num_cells':patch.num_cells_global,'delta':patch.delta}, pickle_file)
# we will reenable this bad boy when we switch over to petsc-dev
# state.q_da.view(viewer)
if write_p:
state.gpVec.view(viewer)
else:
state.gqVec.view(viewer)
if write_aux:
state.gauxVec.view(aux_viewer)
viewer.flush()
viewer.destroy()
if rank==0:
pickle_file.close() |
Python | def read_petsc(solution,frame,path='./',file_prefix='claw',read_aux=False,options={}):
r"""
Read in pickles and PETSc data files representing the solution
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) Solution object to
read the data into.
- *frame* - (int) Frame number to be read in
- *path* - (string) Path to the current directory of the file
- *file_prefix* - (string) Prefix of the files to be read in.
``default = 'fort'``
- *read_aux* (bool) Whether or not an auxiliary file will try to be read
in. ``default = False``
- *options* - (dict) Optional argument dictionary, see
`PETScIO Option Table`_
.. _`PETScIO Option Table`:
format : one of 'ascii' or 'binary'
"""
# Option parsing
option_defaults = {'format':'binary'}
for k in option_defaults.iterkeys():
if options.has_key(k):
pass
else:
options[k] = option_defaults[k]
pickle_filename = os.path.join(path, '%s.pkl' % file_prefix) + str(frame).zfill(4)
viewer_filename = os.path.join(path, '%s.ptc' % file_prefix) + str(frame).zfill(4)
aux_viewer_filename = os.path.join(path, '%s_aux.ptc' % file_prefix)
if frame < 0:
# Don't construct file names with negative frameno values.
raise IOError("Frame " + str(frame) + " does not exist ***")
pickle_file = open(pickle_filename,'rb')
# this dictionary is mostly holding debugging information, only nstates is needed
# most of this information is explicitly saved in the individual patches
value_dict = pickle.load(pickle_file)
nstates = value_dict['nstates']
num_dim = value_dict['num_dim']
num_aux = value_dict['num_aux']
num_eqn = value_dict['num_eqn']
# now set up the PETSc viewer
if options['format'] == 'ascii':
viewer = PETSc.Viewer().createASCII(viewer_filename, PETSc.Viewer.Mode.READ)
if read_aux:
aux_viewer = PETSc.Viewer().createASCII(aux_viewer_filename, PETSc.Viewer.Mode.READ)
elif options['format'] == 'binary':
if hasattr(PETSc.Viewer,'createMPIIO'):
viewer = PETSc.Viewer().createMPIIO(viewer_filename, PETSc.Viewer.Mode.READ)
else:
viewer = PETSc.Viewer().createBinary(viewer_filename, PETSc.Viewer.Mode.READ)
if read_aux:
if os.path.exists(aux_viewer_filename):
if hasattr(PETSc.Viewer,'createMPIIO'):
aux_viewer = PETSc.Viewer().createMPIIO(aux_viewer_filename, PETSc.Viewer.Mode.READ)
else:
aux_viewer = PETSc.Viewer().createBinary(aux_viewer_filename, PETSc.Viewer.Mode.READ)
else:
from warnings import warn
aux_file_path = os.path.join(path,aux_viewer_filename)
warn('read_aux=True but aux file %s does not exist' % aux_file_path)
read_aux=False
else:
raise IOError('format type %s not supported' % options['format'])
patches = []
for m in xrange(nstates):
patch_dict = pickle.load(pickle_file)
level = patch_dict['level']
names = patch_dict['names']
lower = patch_dict['lower']
n = patch_dict['num_cells']
d = patch_dict['delta']
from clawpack import petclaw
dimensions = []
for i in xrange(num_dim):
dimensions.append(
#pyclaw.solution.Dimension(names[i],lower[i],lower[i] + n[i]*d[i],n[i]))
petclaw.Dimension(names[i],lower[i],lower[i] + n[i]*d[i],n[i]))
#patch = pyclaw.solution.Patch(dimensions)
patch = petclaw.Patch(dimensions)
patch.level = level
#state = pyclaw.state.State(patch)
state = petclaw.State(patch,num_eqn,num_aux) ##
state.t = value_dict['t']
state.problem_data = value_dict['problem_data']
# DA View/Load is broken in Petsc-3.1.8, we can load/view the DA if needed in petsc-3.2
# state.q_da.load(viewer)
state.gqVec.load(viewer)
if read_aux:
state.gauxVec.load(aux_viewer)
solution.states.append(state)
patches.append(state.patch)
solution.domain = petclaw.geometry.Domain(patches)
pickle_file.close()
viewer.destroy()
if read_aux:
aux_viewer.destroy() | def read_petsc(solution,frame,path='./',file_prefix='claw',read_aux=False,options={}):
r"""
Read in pickles and PETSc data files representing the solution
:Input:
- *solution* - (:class:`~pyclaw.solution.Solution`) Solution object to
read the data into.
- *frame* - (int) Frame number to be read in
- *path* - (string) Path to the current directory of the file
- *file_prefix* - (string) Prefix of the files to be read in.
``default = 'fort'``
- *read_aux* (bool) Whether or not an auxiliary file will try to be read
in. ``default = False``
- *options* - (dict) Optional argument dictionary, see
`PETScIO Option Table`_
.. _`PETScIO Option Table`:
format : one of 'ascii' or 'binary'
"""
# Option parsing
option_defaults = {'format':'binary'}
for k in option_defaults.iterkeys():
if options.has_key(k):
pass
else:
options[k] = option_defaults[k]
pickle_filename = os.path.join(path, '%s.pkl' % file_prefix) + str(frame).zfill(4)
viewer_filename = os.path.join(path, '%s.ptc' % file_prefix) + str(frame).zfill(4)
aux_viewer_filename = os.path.join(path, '%s_aux.ptc' % file_prefix)
if frame < 0:
# Don't construct file names with negative frameno values.
raise IOError("Frame " + str(frame) + " does not exist ***")
pickle_file = open(pickle_filename,'rb')
# this dictionary is mostly holding debugging information, only nstates is needed
# most of this information is explicitly saved in the individual patches
value_dict = pickle.load(pickle_file)
nstates = value_dict['nstates']
num_dim = value_dict['num_dim']
num_aux = value_dict['num_aux']
num_eqn = value_dict['num_eqn']
# now set up the PETSc viewer
if options['format'] == 'ascii':
viewer = PETSc.Viewer().createASCII(viewer_filename, PETSc.Viewer.Mode.READ)
if read_aux:
aux_viewer = PETSc.Viewer().createASCII(aux_viewer_filename, PETSc.Viewer.Mode.READ)
elif options['format'] == 'binary':
if hasattr(PETSc.Viewer,'createMPIIO'):
viewer = PETSc.Viewer().createMPIIO(viewer_filename, PETSc.Viewer.Mode.READ)
else:
viewer = PETSc.Viewer().createBinary(viewer_filename, PETSc.Viewer.Mode.READ)
if read_aux:
if os.path.exists(aux_viewer_filename):
if hasattr(PETSc.Viewer,'createMPIIO'):
aux_viewer = PETSc.Viewer().createMPIIO(aux_viewer_filename, PETSc.Viewer.Mode.READ)
else:
aux_viewer = PETSc.Viewer().createBinary(aux_viewer_filename, PETSc.Viewer.Mode.READ)
else:
from warnings import warn
aux_file_path = os.path.join(path,aux_viewer_filename)
warn('read_aux=True but aux file %s does not exist' % aux_file_path)
read_aux=False
else:
raise IOError('format type %s not supported' % options['format'])
patches = []
for m in xrange(nstates):
patch_dict = pickle.load(pickle_file)
level = patch_dict['level']
names = patch_dict['names']
lower = patch_dict['lower']
n = patch_dict['num_cells']
d = patch_dict['delta']
from clawpack import petclaw
dimensions = []
for i in xrange(num_dim):
dimensions.append(
#pyclaw.solution.Dimension(names[i],lower[i],lower[i] + n[i]*d[i],n[i]))
petclaw.Dimension(names[i],lower[i],lower[i] + n[i]*d[i],n[i]))
#patch = pyclaw.solution.Patch(dimensions)
patch = petclaw.Patch(dimensions)
patch.level = level
#state = pyclaw.state.State(patch)
state = petclaw.State(patch,num_eqn,num_aux) ##
state.t = value_dict['t']
state.problem_data = value_dict['problem_data']
# DA View/Load is broken in Petsc-3.1.8, we can load/view the DA if needed in petsc-3.2
# state.q_da.load(viewer)
state.gqVec.load(viewer)
if read_aux:
state.gauxVec.load(aux_viewer)
solution.states.append(state)
patches.append(state.patch)
solution.domain = petclaw.geometry.Domain(patches)
pickle_file.close()
viewer.destroy()
if read_aux:
aux_viewer.destroy() |
Python | def read_petsc_t(frame,path='./',file_prefix='claw'):
r"""Read only the petsc.pkl file and return the data
:Input:
- *frame* - (int) Frame number to be read in
- *path* - (string) Path to the current directory of the file
- *file_prefix* - (string) Prefix of the files to be read in.
``default = 'claw'``
:Output:
- (list) List of output variables
- *t* - (int) Time of frame
- *num_eqn* - (int) Number of equations in the frame
- *npatches* - (int) Number of patches
- *num_aux* - (int) Auxillary value in the frame
- *num_dim* - (int) Number of dimensions in q and aux
"""
base_path = os.path.join(path,)
path = os.path.join(base_path, '%s.pkl' % file_prefix) + str(frame).zfill(4)
try:
f = open(path,'rb')
logger.debug("Opening %s file." % path)
patch_dict = pickle.load(f)
t = patch_dict['t']
num_eqn = patch_dict['num_eqn']
nstates = patch_dict['nstates']
num_aux = patch_dict['num_aux']
num_dim = patch_dict['num_dim']
f.close()
except(IOError):
raise
except:
logger.error("File " + path + " should contain t, num_eqn, npatches, num_aux, num_dim")
print "File " + path + " should contain t, num_eqn, npatches, num_aux, num_dim"
raise
return t,num_eqn,nstates,num_aux,num_dim | def read_petsc_t(frame,path='./',file_prefix='claw'):
r"""Read only the petsc.pkl file and return the data
:Input:
- *frame* - (int) Frame number to be read in
- *path* - (string) Path to the current directory of the file
- *file_prefix* - (string) Prefix of the files to be read in.
``default = 'claw'``
:Output:
- (list) List of output variables
- *t* - (int) Time of frame
- *num_eqn* - (int) Number of equations in the frame
- *npatches* - (int) Number of patches
- *num_aux* - (int) Auxillary value in the frame
- *num_dim* - (int) Number of dimensions in q and aux
"""
base_path = os.path.join(path,)
path = os.path.join(base_path, '%s.pkl' % file_prefix) + str(frame).zfill(4)
try:
f = open(path,'rb')
logger.debug("Opening %s file." % path)
patch_dict = pickle.load(f)
t = patch_dict['t']
num_eqn = patch_dict['num_eqn']
nstates = patch_dict['nstates']
num_aux = patch_dict['num_aux']
num_dim = patch_dict['num_dim']
f.close()
except(IOError):
raise
except:
logger.error("File " + path + " should contain t, num_eqn, npatches, num_aux, num_dim")
print "File " + path + " should contain t, num_eqn, npatches, num_aux, num_dim"
raise
return t,num_eqn,nstates,num_aux,num_dim |
Python | def acoustics(use_petsc=False,kernel_language='Fortran',solver_type='classic',iplot=False,htmlplot=False,outdir='./_output',weno_order=5):
"""
This example solves the 1-dimensional acoustics equations in a homogeneous
medium.
"""
from numpy import sqrt, exp, cos
#=================================================================
# Import the appropriate classes, depending on the options passed
#=================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='classic':
solver = pyclaw.ClawSolver1D()
elif solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
solver.weno_order=weno_order
else: raise Exception('Unrecognized value of solver_type.')
#========================================================================
# Instantiate the solver and define the system of equations to be solved
#========================================================================
solver.kernel_language=kernel_language
from clawpack.riemann import rp_acoustics
solver.num_waves=rp_acoustics.num_waves
if kernel_language=='Python':
solver.rp = rp_acoustics.rp_acoustics_1d
else:
from clawpack.riemann import rp1_acoustics
solver.rp = rp1_acoustics
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
#========================================================================
# Instantiate the domain and set the boundary conditions
#========================================================================
x = pyclaw.Dimension('x',0.0,1.0,100)
domain = pyclaw.Domain(x)
num_eqn = 2
state = pyclaw.State(domain,num_eqn)
#========================================================================
# Set problem-specific variables
#========================================================================
rho = 1.0
bulk = 1.0
state.problem_data['rho']=rho
state.problem_data['bulk']=bulk
state.problem_data['zz']=sqrt(rho*bulk)
state.problem_data['cc']=sqrt(bulk/rho)
#========================================================================
# Set the initial condition
#========================================================================
xc=domain.grid.x.centers
beta=100; gamma=0; x0=0.75
state.q[0,:] = exp(-beta * (xc-x0)**2) * cos(gamma * (xc - x0))
state.q[1,:] = 0.
solver.dt_initial=domain.grid.delta[0]/state.problem_data['cc']*0.1
#========================================================================
# Set up the controller object
#========================================================================
claw = pyclaw.Controller()
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir = outdir
claw.keep_copy = True
claw.num_output_times = 5
claw.tfinal = 1.0
# Solve
status = claw.run()
# Plot results
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir)
return claw | def acoustics(use_petsc=False,kernel_language='Fortran',solver_type='classic',iplot=False,htmlplot=False,outdir='./_output',weno_order=5):
"""
This example solves the 1-dimensional acoustics equations in a homogeneous
medium.
"""
from numpy import sqrt, exp, cos
#=================================================================
# Import the appropriate classes, depending on the options passed
#=================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='classic':
solver = pyclaw.ClawSolver1D()
elif solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
solver.weno_order=weno_order
else: raise Exception('Unrecognized value of solver_type.')
#========================================================================
# Instantiate the solver and define the system of equations to be solved
#========================================================================
solver.kernel_language=kernel_language
from clawpack.riemann import rp_acoustics
solver.num_waves=rp_acoustics.num_waves
if kernel_language=='Python':
solver.rp = rp_acoustics.rp_acoustics_1d
else:
from clawpack.riemann import rp1_acoustics
solver.rp = rp1_acoustics
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
#========================================================================
# Instantiate the domain and set the boundary conditions
#========================================================================
x = pyclaw.Dimension('x',0.0,1.0,100)
domain = pyclaw.Domain(x)
num_eqn = 2
state = pyclaw.State(domain,num_eqn)
#========================================================================
# Set problem-specific variables
#========================================================================
rho = 1.0
bulk = 1.0
state.problem_data['rho']=rho
state.problem_data['bulk']=bulk
state.problem_data['zz']=sqrt(rho*bulk)
state.problem_data['cc']=sqrt(bulk/rho)
#========================================================================
# Set the initial condition
#========================================================================
xc=domain.grid.x.centers
beta=100; gamma=0; x0=0.75
state.q[0,:] = exp(-beta * (xc-x0)**2) * cos(gamma * (xc - x0))
state.q[1,:] = 0.
solver.dt_initial=domain.grid.delta[0]/state.problem_data['cc']*0.1
#========================================================================
# Set up the controller object
#========================================================================
claw = pyclaw.Controller()
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir = outdir
claw.keep_copy = True
claw.num_output_times = 5
claw.tfinal = 1.0
# Solve
status = claw.run()
# Plot results
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir)
return claw |
Python | def acoustics2D(kernel_language='Fortran',iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic'):
"""
Example python script for solving the 2d acoustics equations.
"""
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='classic':
solver=pyclaw.ClawSolver2D()
solver.dimensional_split=False
solver.limiters = pyclaw.limiters.tvd.MC
elif solver_type=='sharpclaw':
solver=pyclaw.SharpClawSolver2D()
from clawpack import riemann
solver.rp = riemann.rp2_vc_acoustics
solver.num_waves = 2
solver.bc_lower[0]=pyclaw.BC.wall
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.wall
solver.bc_upper[1]=pyclaw.BC.extrap
solver.aux_bc_lower[0]=pyclaw.BC.wall
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.wall
solver.aux_bc_upper[1]=pyclaw.BC.extrap
# Initialize domain
mx=200; my=200
x = pyclaw.Dimension('x',-1.0,1.0,mx)
y = pyclaw.Dimension('y',-1.0,1.0,my)
domain = pyclaw.Domain([x,y])
num_eqn = 3
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
# Cell centers coordinates
grid = state.grid
Y,X = np.meshgrid(grid.y.centers,grid.x.centers)
# Set aux arrays
rhol = 4.0
rhor = 1.0
bulkl = 4.0
bulkr = 4.0
cl = np.sqrt(bulkl/rhol)
cr = np.sqrt(bulkr/rhor)
state.aux[0,:,:] = rhol*(X<0.) + rhor*(X>=0.) # Density
state.aux[1,:,:] = cl*(X<0.) + cr*(X>=0.) # Sound speed
# Set initial condition
x0 = -0.5; y0 = 0.
r = np.sqrt((X-x0)**2 + (Y-y0)**2)
width=0.1; rad=0.25
state.q[0,:,:] = (np.abs(r-rad)<=width)*(1.+np.cos(np.pi*(r-rad)/width))
state.q[1,:,:] = 0.
state.q[2,:,:] = 0.
claw = pyclaw.Controller()
claw.keep_copy = True
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir=outdir
claw.num_output_times = 20
# Solve
claw.tfinal = 0.6
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir,file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir,file_format=claw.output_format)
return claw | def acoustics2D(kernel_language='Fortran',iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic'):
"""
Example python script for solving the 2d acoustics equations.
"""
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='classic':
solver=pyclaw.ClawSolver2D()
solver.dimensional_split=False
solver.limiters = pyclaw.limiters.tvd.MC
elif solver_type=='sharpclaw':
solver=pyclaw.SharpClawSolver2D()
from clawpack import riemann
solver.rp = riemann.rp2_vc_acoustics
solver.num_waves = 2
solver.bc_lower[0]=pyclaw.BC.wall
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.wall
solver.bc_upper[1]=pyclaw.BC.extrap
solver.aux_bc_lower[0]=pyclaw.BC.wall
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.wall
solver.aux_bc_upper[1]=pyclaw.BC.extrap
# Initialize domain
mx=200; my=200
x = pyclaw.Dimension('x',-1.0,1.0,mx)
y = pyclaw.Dimension('y',-1.0,1.0,my)
domain = pyclaw.Domain([x,y])
num_eqn = 3
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
# Cell centers coordinates
grid = state.grid
Y,X = np.meshgrid(grid.y.centers,grid.x.centers)
# Set aux arrays
rhol = 4.0
rhor = 1.0
bulkl = 4.0
bulkr = 4.0
cl = np.sqrt(bulkl/rhol)
cr = np.sqrt(bulkr/rhor)
state.aux[0,:,:] = rhol*(X<0.) + rhor*(X>=0.) # Density
state.aux[1,:,:] = cl*(X<0.) + cr*(X>=0.) # Sound speed
# Set initial condition
x0 = -0.5; y0 = 0.
r = np.sqrt((X-x0)**2 + (Y-y0)**2)
width=0.1; rad=0.25
state.q[0,:,:] = (np.abs(r-rad)<=width)*(1.+np.cos(np.pi*(r-rad)/width))
state.q[1,:,:] = 0.
state.q[2,:,:] = 0.
claw = pyclaw.Controller()
claw.keep_copy = True
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir=outdir
claw.num_output_times = 20
# Solve
claw.tfinal = 0.6
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir,file_format=claw.output_format)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir,file_format=claw.output_format)
return claw |
Python | def stegoton(use_petsc=0,kernel_language='Fortran',solver_type='classic',iplot=0,htmlplot=0,outdir='./_output'):
"""
Stegoton problem.
Nonlinear elasticity in periodic medium.
See LeVeque & Yong (2003).
$$\\epsilon_t - u_x = 0$$
$$\\rho(x) u_t - \\sigma(\\epsilon,x)_x = 0$$
"""
vary_Z=False
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
else:
solver = pyclaw.ClawSolver1D()
solver.kernel_language = kernel_language
if kernel_language=='Python':
solver.set_riemann_solver('nonlinear_elasticity')
elif kernel_language=='Fortran':
from clawpack import riemann
solver.rp = riemann.rp1_nonlinear_elasticity_fwave
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
#Use the same BCs for the aux array
solver.aux_bc_lower = solver.bc_lower
solver.aux_bc_upper = solver.bc_lower
xlower=0.0; xupper=600.0
cellsperlayer=6; mx=int(round(xupper-xlower))*cellsperlayer
x = pyclaw.Dimension('x',xlower,xupper,mx)
domain = pyclaw.Domain(x)
num_eqn = 2
state = pyclaw.State(domain,num_eqn)
#Set global parameters
alpha = 0.5
KA = 1.0
KB = 4.0
rhoA = 1.0
rhoB = 4.0
state.problem_data = {}
state.problem_data['t1'] = 10.0
state.problem_data['tw1'] = 10.0
state.problem_data['a1'] = 0.0
state.problem_data['alpha'] = alpha
state.problem_data['KA'] = KA
state.problem_data['KB'] = KB
state.problem_data['rhoA'] = rhoA
state.problem_data['rhoB'] = rhoB
state.problem_data['trtime'] = 250.0
state.problem_data['trdone'] = False
#Initialize q and aux
xc=state.grid.x.centers
state.aux=setaux(xc,rhoB,KB,rhoA,KA,alpha,solver.aux_bc_lower[0],xupper=xupper)
qinit(state,ic=2,a2=1.0,xupper=xupper)
tfinal=500.; num_output_times = 10;
solver.max_steps = 5000000
solver.fwave = True
solver.before_step = b4step
solver.user_bc_lower=moving_wall_bc
solver.user_bc_upper=zero_bc
solver.num_waves=2
if solver_type=='sharpclaw':
solver.lim_type = 2
solver.char_decomp=0
claw = pyclaw.Controller()
claw.keep_copy = False
claw.output_style = 1
claw.num_output_times = num_output_times
claw.tfinal = tfinal
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
if vary_Z==True:
#Zvalues = [1.0,1.2,1.4,1.6,1.8,2.0,2.2,2.4,2.6,2.8,3.0,3.5,4.0]
Zvalues = [3.5,4.0]
#a2values= [0.9766161130681, 1.0888194560100042, 1.1601786315361329, 1.20973731651806, 1.2462158254919984]
for ii,Z in enumerate(Zvalues):
a2=1.0 #a2values[ii]
KB = Z
rhoB = Z
state.problem_data['KB'] = KB
state.problem_data['rhoB'] = rhoB
state.problem_data['trdone'] = False
state.aux=setaux(xc,rhoB,KB,rhoA,KA,alpha,bc_lower,xupper=xupper)
patch.x.bc_lower=2
patch.x.bc_upper=2
state.t = 0.0
qinit(state,ic=2,a2=a2)
init_solution = Solution(state,domain)
claw.solution = init_solution
claw.solution.t = 0.0
claw.tfinal = tfinal
claw.outdir = './_output_Z'+str(Z)+'_'+str(cellsperlayer)
status = claw.run()
else:
# Solve
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) | def stegoton(use_petsc=0,kernel_language='Fortran',solver_type='classic',iplot=0,htmlplot=0,outdir='./_output'):
"""
Stegoton problem.
Nonlinear elasticity in periodic medium.
See LeVeque & Yong (2003).
$$\\epsilon_t - u_x = 0$$
$$\\rho(x) u_t - \\sigma(\\epsilon,x)_x = 0$$
"""
vary_Z=False
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
else:
solver = pyclaw.ClawSolver1D()
solver.kernel_language = kernel_language
if kernel_language=='Python':
solver.set_riemann_solver('nonlinear_elasticity')
elif kernel_language=='Fortran':
from clawpack import riemann
solver.rp = riemann.rp1_nonlinear_elasticity_fwave
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
#Use the same BCs for the aux array
solver.aux_bc_lower = solver.bc_lower
solver.aux_bc_upper = solver.bc_lower
xlower=0.0; xupper=600.0
cellsperlayer=6; mx=int(round(xupper-xlower))*cellsperlayer
x = pyclaw.Dimension('x',xlower,xupper,mx)
domain = pyclaw.Domain(x)
num_eqn = 2
state = pyclaw.State(domain,num_eqn)
#Set global parameters
alpha = 0.5
KA = 1.0
KB = 4.0
rhoA = 1.0
rhoB = 4.0
state.problem_data = {}
state.problem_data['t1'] = 10.0
state.problem_data['tw1'] = 10.0
state.problem_data['a1'] = 0.0
state.problem_data['alpha'] = alpha
state.problem_data['KA'] = KA
state.problem_data['KB'] = KB
state.problem_data['rhoA'] = rhoA
state.problem_data['rhoB'] = rhoB
state.problem_data['trtime'] = 250.0
state.problem_data['trdone'] = False
#Initialize q and aux
xc=state.grid.x.centers
state.aux=setaux(xc,rhoB,KB,rhoA,KA,alpha,solver.aux_bc_lower[0],xupper=xupper)
qinit(state,ic=2,a2=1.0,xupper=xupper)
tfinal=500.; num_output_times = 10;
solver.max_steps = 5000000
solver.fwave = True
solver.before_step = b4step
solver.user_bc_lower=moving_wall_bc
solver.user_bc_upper=zero_bc
solver.num_waves=2
if solver_type=='sharpclaw':
solver.lim_type = 2
solver.char_decomp=0
claw = pyclaw.Controller()
claw.keep_copy = False
claw.output_style = 1
claw.num_output_times = num_output_times
claw.tfinal = tfinal
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
if vary_Z==True:
#Zvalues = [1.0,1.2,1.4,1.6,1.8,2.0,2.2,2.4,2.6,2.8,3.0,3.5,4.0]
Zvalues = [3.5,4.0]
#a2values= [0.9766161130681, 1.0888194560100042, 1.1601786315361329, 1.20973731651806, 1.2462158254919984]
for ii,Z in enumerate(Zvalues):
a2=1.0 #a2values[ii]
KB = Z
rhoB = Z
state.problem_data['KB'] = KB
state.problem_data['rhoB'] = rhoB
state.problem_data['trdone'] = False
state.aux=setaux(xc,rhoB,KB,rhoA,KA,alpha,bc_lower,xupper=xupper)
patch.x.bc_lower=2
patch.x.bc_upper=2
state.t = 0.0
qinit(state,ic=2,a2=a2)
init_solution = Solution(state,domain)
claw.solution = init_solution
claw.solution.t = 0.0
claw.tfinal = tfinal
claw.outdir = './_output_Z'+str(Z)+'_'+str(cellsperlayer)
status = claw.run()
else:
# Solve
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) |
Python | def acoustics3D(iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic',**kwargs):
"""
Example python script for solving the 3d acoustics equations.
"""
#===========================================================================
# Global variables
#===========================================================================
global INIT_MIN_MESH_WIDTH
global SUBDIVISION_FACTOR
global XI
global XF
global TFINAL
global NUM_OUTPUT_TIMES
#===========================================================================
# Import libraries
#===========================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
if solver_type=='classic':
solver=pyclaw.ClawSolver3D()
else:
raise Exception('Unrecognized solver_type.')
from clawpack import riemann
# Peano Solver
peanoSolver = peanoclaw.Solver(solver,
INIT_MIN_MESH_WIDTH,
init,
refinement_criterion=refinement_criterion
)
solver.rp = riemann.rp3_vc_acoustics
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
solver.bc_lower[2]=pyclaw.BC.extrap
solver.bc_upper[2]=pyclaw.BC.extrap
solver.aux_bc_lower[0]=pyclaw.BC.extrap
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.extrap
solver.aux_bc_upper[1]=pyclaw.BC.extrap
solver.aux_bc_lower[2]=pyclaw.BC.extrap
solver.aux_bc_upper[2]=pyclaw.BC.extrap
solver.dimensional_split=True
solver.limiters = pyclaw.limiters.tvd.MC
#===========================================================================
# Initialize domain and state, then initialize the solution associated to the
# state and finally initialize aux array
#===========================================================================
# Initialize domain
mx = SUBDIVISION_FACTOR
my = SUBDIVISION_FACTOR
mz = SUBDIVISION_FACTOR
x = pyclaw.Dimension('x', XI, XF, mx)
y = pyclaw.Dimension('y', XI, XF, my)
z = pyclaw.Dimension('z', XI, XF, mz)
domain = pyclaw.Domain([x,y,z])
num_eqn = 4
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = TFINAL
claw.keep_copy = True
claw.solution = peanoclaw.solution.Solution(state,domain) #pyclaw.Solution(state,domain)
claw.solver = peanoSolver #solver
claw.outdir=outdir
claw.num_output_times = NUM_OUTPUT_TIMES
#===========================================================================
# Solve the problem
#===========================================================================
status = claw.run() | def acoustics3D(iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic',**kwargs):
"""
Example python script for solving the 3d acoustics equations.
"""
#===========================================================================
# Global variables
#===========================================================================
global INIT_MIN_MESH_WIDTH
global SUBDIVISION_FACTOR
global XI
global XF
global TFINAL
global NUM_OUTPUT_TIMES
#===========================================================================
# Import libraries
#===========================================================================
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
if solver_type=='classic':
solver=pyclaw.ClawSolver3D()
else:
raise Exception('Unrecognized solver_type.')
from clawpack import riemann
# Peano Solver
peanoSolver = peanoclaw.Solver(solver,
INIT_MIN_MESH_WIDTH,
init,
refinement_criterion=refinement_criterion
)
solver.rp = riemann.rp3_vc_acoustics
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
solver.bc_lower[2]=pyclaw.BC.extrap
solver.bc_upper[2]=pyclaw.BC.extrap
solver.aux_bc_lower[0]=pyclaw.BC.extrap
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.extrap
solver.aux_bc_upper[1]=pyclaw.BC.extrap
solver.aux_bc_lower[2]=pyclaw.BC.extrap
solver.aux_bc_upper[2]=pyclaw.BC.extrap
solver.dimensional_split=True
solver.limiters = pyclaw.limiters.tvd.MC
#===========================================================================
# Initialize domain and state, then initialize the solution associated to the
# state and finally initialize aux array
#===========================================================================
# Initialize domain
mx = SUBDIVISION_FACTOR
my = SUBDIVISION_FACTOR
mz = SUBDIVISION_FACTOR
x = pyclaw.Dimension('x', XI, XF, mx)
y = pyclaw.Dimension('y', XI, XF, my)
z = pyclaw.Dimension('z', XI, XF, mz)
domain = pyclaw.Domain([x,y,z])
num_eqn = 4
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = TFINAL
claw.keep_copy = True
claw.solution = peanoclaw.solution.Solution(state,domain) #pyclaw.Solution(state,domain)
claw.solver = peanoSolver #solver
claw.outdir=outdir
claw.num_output_times = NUM_OUTPUT_TIMES
#===========================================================================
# Solve the problem
#===========================================================================
status = claw.run() |
Python | def step(self, maximum_timestep_size, estimated_next_dt):
r"""
Performs one timestep on the subgrid. This might result in several runs of the
solver to find the maximum allowed timestep size in terms of stability.
:Input:
- *maximum_timestep_size* - This is the maximum allowed timestep size in terms
of the grid topology and the global timestep. I.e.
neighboring subgrids might forbid a timestep on this
subgrid. Also this subgrid is not allowed to advance
further than the the global timestep.
- *estimated_next_dt*- This is the estimation for the maximum allowed timestep size
in terms of stability and results from the cfl number of the
last timestep performed on this grid.
"""
self.solver.dt = min(maximum_timestep_size, estimated_next_dt)
# Set qbc and timestep for the current patch
self.solver.qbc = self.qbc
self.solver.dt_max = maximum_timestep_size
self.solver.evolve_to_time(self.solution)
return self.solution.state.q | def step(self, maximum_timestep_size, estimated_next_dt):
r"""
Performs one timestep on the subgrid. This might result in several runs of the
solver to find the maximum allowed timestep size in terms of stability.
:Input:
- *maximum_timestep_size* - This is the maximum allowed timestep size in terms
of the grid topology and the global timestep. I.e.
neighboring subgrids might forbid a timestep on this
subgrid. Also this subgrid is not allowed to advance
further than the the global timestep.
- *estimated_next_dt*- This is the estimation for the maximum allowed timestep size
in terms of stability and results from the cfl number of the
last timestep performed on this grid.
"""
self.solver.dt = min(maximum_timestep_size, estimated_next_dt)
# Set qbc and timestep for the current patch
self.solver.qbc = self.qbc
self.solver.dt_max = maximum_timestep_size
self.solver.evolve_to_time(self.solution)
return self.solution.state.q |
Python | def acoustics3D(iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic',**kwargs):
"""
Example python script for solving the 3d acoustics equations.
"""
#===========================================================================
# Import libraries
#===========================================================================
global INIT_MIN_MESH_WIDTH
global NUM_OUTPUT_TIMES
global TFINAL
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
if solver_type=='classic':
solver=pyclaw.ClawSolver3D()
else:
raise Exception('Unrecognized solver_type.')
from clawpack import riemann
# Peano Solver
peanoSolver = peanoclaw.Solver(solver,
INIT_MIN_MESH_WIDTH,
init,
refinement_criterion=refinement_criterion
)
solver.rp = riemann.rp3_vc_acoustics
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
solver.bc_lower[2]=pyclaw.BC.extrap
solver.bc_upper[2]=pyclaw.BC.extrap
solver.aux_bc_lower[0]=pyclaw.BC.extrap
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.extrap
solver.aux_bc_upper[1]=pyclaw.BC.extrap
solver.aux_bc_lower[2]=pyclaw.BC.extrap
solver.aux_bc_upper[2]=pyclaw.BC.extrap
solver.dimensional_split=True
solver.limiters = pyclaw.limiters.tvd.MC
#===========================================================================
# Initialize domain and state, then initialize the solution associated to the
# state and finally initialize aux array
#===========================================================================
# Initialize domain
mx = SUBDIVISION_FACTOR
my = SUBDIVISION_FACTOR
mz = SUBDIVISION_FACTOR
x = pyclaw.Dimension('x', XI, XF, mx)
y = pyclaw.Dimension('y', XI, XF, my)
z = pyclaw.Dimension('z', XI, XF, mz)
domain = pyclaw.Domain([x,y,z])
num_eqn = 4
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = TFINAL
claw.keep_copy = True
claw.solution = peanoclaw.solution.Solution(state,domain) #pyclaw.Solution(state,domain)
claw.solver = peanoSolver #solver
claw.outdir=outdir
claw.num_output_times = NUM_OUTPUT_TIMES
#solver.before_step = _probe
status = claw.run()
return claw | def acoustics3D(iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic',**kwargs):
"""
Example python script for solving the 3d acoustics equations.
"""
#===========================================================================
# Import libraries
#===========================================================================
global INIT_MIN_MESH_WIDTH
global NUM_OUTPUT_TIMES
global TFINAL
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
if solver_type=='classic':
solver=pyclaw.ClawSolver3D()
else:
raise Exception('Unrecognized solver_type.')
from clawpack import riemann
# Peano Solver
peanoSolver = peanoclaw.Solver(solver,
INIT_MIN_MESH_WIDTH,
init,
refinement_criterion=refinement_criterion
)
solver.rp = riemann.rp3_vc_acoustics
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
solver.bc_lower[2]=pyclaw.BC.extrap
solver.bc_upper[2]=pyclaw.BC.extrap
solver.aux_bc_lower[0]=pyclaw.BC.extrap
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.extrap
solver.aux_bc_upper[1]=pyclaw.BC.extrap
solver.aux_bc_lower[2]=pyclaw.BC.extrap
solver.aux_bc_upper[2]=pyclaw.BC.extrap
solver.dimensional_split=True
solver.limiters = pyclaw.limiters.tvd.MC
#===========================================================================
# Initialize domain and state, then initialize the solution associated to the
# state and finally initialize aux array
#===========================================================================
# Initialize domain
mx = SUBDIVISION_FACTOR
my = SUBDIVISION_FACTOR
mz = SUBDIVISION_FACTOR
x = pyclaw.Dimension('x', XI, XF, mx)
y = pyclaw.Dimension('y', XI, XF, my)
z = pyclaw.Dimension('z', XI, XF, mz)
domain = pyclaw.Domain([x,y,z])
num_eqn = 4
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
#===========================================================================
# Set up controller and controller parameters
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal = TFINAL
claw.keep_copy = True
claw.solution = peanoclaw.solution.Solution(state,domain) #pyclaw.Solution(state,domain)
claw.solver = peanoSolver #solver
claw.outdir=outdir
claw.num_output_times = NUM_OUTPUT_TIMES
#solver.before_step = _probe
status = claw.run()
return claw |
Python | def pyclaw_acoustics3D(iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic',**kwargs):
"""
Example python script for solving the 3d acoustics equations.
"""
global NUM_OUTPUT_TIMES
global TFINAL
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='classic':
solver=pyclaw.ClawSolver3D()
else:
raise Exception('Unrecognized solver_type.')
from clawpack import riemann
solver.rp = riemann.rp3_vc_acoustics
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
solver.bc_lower[2]=pyclaw.BC.extrap
solver.bc_upper[2]=pyclaw.BC.extrap
solver.aux_bc_lower[0]=pyclaw.BC.extrap
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.extrap
solver.aux_bc_upper[1]=pyclaw.BC.extrap
solver.aux_bc_lower[2]=pyclaw.BC.extrap
solver.aux_bc_upper[2]=pyclaw.BC.extrap
solver.dimensional_split=True
solver.limiters = pyclaw.limiters.tvd.MC
#===========================================================================
# Initialize domain and state, then initialize the solution associated to the
# state and finally initialize aux array
#===========================================================================
# Initialize domain
mx = SUBDIVISION_FACTOR * CELLS
my = SUBDIVISION_FACTOR * CELLS
mz = SUBDIVISION_FACTOR * CELLS
x = pyclaw.Dimension('x', XI, XF, mx)
y = pyclaw.Dimension('y', XI, XF, my)
z = pyclaw.Dimension('z', XI, XF, mz)
domain = pyclaw.Domain([x,y,z])
num_eqn = 4
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
init(state)
claw = pyclaw.Controller()
claw.keep_copy = True
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir=outdir
claw.num_output_times = NUM_OUTPUT_TIMES
# Solve
claw.tfinal = TFINAL
status = claw.run()
return claw | def pyclaw_acoustics3D(iplot=False,htmlplot=False,use_petsc=False,outdir='./_output',solver_type='classic',**kwargs):
"""
Example python script for solving the 3d acoustics equations.
"""
global NUM_OUTPUT_TIMES
global TFINAL
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='classic':
solver=pyclaw.ClawSolver3D()
else:
raise Exception('Unrecognized solver_type.')
from clawpack import riemann
solver.rp = riemann.rp3_vc_acoustics
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.MC
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
solver.bc_lower[2]=pyclaw.BC.extrap
solver.bc_upper[2]=pyclaw.BC.extrap
solver.aux_bc_lower[0]=pyclaw.BC.extrap
solver.aux_bc_upper[0]=pyclaw.BC.extrap
solver.aux_bc_lower[1]=pyclaw.BC.extrap
solver.aux_bc_upper[1]=pyclaw.BC.extrap
solver.aux_bc_lower[2]=pyclaw.BC.extrap
solver.aux_bc_upper[2]=pyclaw.BC.extrap
solver.dimensional_split=True
solver.limiters = pyclaw.limiters.tvd.MC
#===========================================================================
# Initialize domain and state, then initialize the solution associated to the
# state and finally initialize aux array
#===========================================================================
# Initialize domain
mx = SUBDIVISION_FACTOR * CELLS
my = SUBDIVISION_FACTOR * CELLS
mz = SUBDIVISION_FACTOR * CELLS
x = pyclaw.Dimension('x', XI, XF, mx)
y = pyclaw.Dimension('y', XI, XF, my)
z = pyclaw.Dimension('z', XI, XF, mz)
domain = pyclaw.Domain([x,y,z])
num_eqn = 4
num_aux = 2 # density, sound speed
state = pyclaw.State(domain,num_eqn,num_aux)
init(state)
claw = pyclaw.Controller()
claw.keep_copy = True
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir=outdir
claw.num_output_times = NUM_OUTPUT_TIMES
# Solve
claw.tfinal = TFINAL
status = claw.run()
return claw |
Python | def kpp(use_petsc=False,iplot=False,htmlplot=False,outdir='./_output',solver_type='classic'):
"""
Example python script for solving the 2d KPP equations.
"""
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver2D()
else:
solver = pyclaw.ClawSolver2D()
from clawpack import riemann
solver.rp = riemann.rp2_kpp
solver.num_waves = 1
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
# Initialize domain
mx=200; my=200
x = pyclaw.Dimension('x',-2.0,2.0,mx)
y = pyclaw.Dimension('y',-2.0,2.0,my)
domain = pyclaw.Domain([x,y])
num_eqn = 1
state = pyclaw.State(domain,num_eqn)
qinit(state)
solver.dimensional_split = 1
solver.cfl_max = 1.0
solver.cfl_desired = 0.9
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.minmod
claw = pyclaw.Controller()
claw.tfinal = 1.0
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.num_output_times = 10
# Solve
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) | def kpp(use_petsc=False,iplot=False,htmlplot=False,outdir='./_output',solver_type='classic'):
"""
Example python script for solving the 2d KPP equations.
"""
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver2D()
else:
solver = pyclaw.ClawSolver2D()
from clawpack import riemann
solver.rp = riemann.rp2_kpp
solver.num_waves = 1
solver.bc_lower[0]=pyclaw.BC.extrap
solver.bc_upper[0]=pyclaw.BC.extrap
solver.bc_lower[1]=pyclaw.BC.extrap
solver.bc_upper[1]=pyclaw.BC.extrap
# Initialize domain
mx=200; my=200
x = pyclaw.Dimension('x',-2.0,2.0,mx)
y = pyclaw.Dimension('y',-2.0,2.0,my)
domain = pyclaw.Domain([x,y])
num_eqn = 1
state = pyclaw.State(domain,num_eqn)
qinit(state)
solver.dimensional_split = 1
solver.cfl_max = 1.0
solver.cfl_desired = 0.9
solver.num_waves = 2
solver.limiters = pyclaw.limiters.tvd.minmod
claw = pyclaw.Controller()
claw.tfinal = 1.0
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.num_output_times = 10
# Solve
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) |
Python | def qinit(state,mx,my):
"""
Initialize with two Gaussian pulses.
"""
# The following parameters match the vaules used in clawpack
# ==========================================================
# First gaussian pulse
A1 = 1. # Amplitude
beta1 = 40. # Decay factor
x1 = -0.5 # x-coordinate of the centers
y1 = 0. # y-coordinate of the centers
# Second gaussian pulse
A2 = -1. # Amplitude
beta2 = 40. # Decay factor
x2 = 0.5 # x-coordinate of the centers
y2 = 0. # y-coordinate of the centers
# Compute location of all grid cell centers coordinates and store them
state.grid.compute_p_centers(recompute=True)
xp = state.grid.p_centers[0]
yp = state.grid.p_centers[1]
state.q[0,:,:] = A1*np.exp(-beta1*(np.square(xp-x1) + np.square(yp-y1)))\
+ A2*np.exp(-beta2*(np.square(xp-x2) + np.square(yp-y2))) | def qinit(state,mx,my):
"""
Initialize with two Gaussian pulses.
"""
# The following parameters match the vaules used in clawpack
# ==========================================================
# First gaussian pulse
A1 = 1. # Amplitude
beta1 = 40. # Decay factor
x1 = -0.5 # x-coordinate of the centers
y1 = 0. # y-coordinate of the centers
# Second gaussian pulse
A2 = -1. # Amplitude
beta2 = 40. # Decay factor
x2 = 0.5 # x-coordinate of the centers
y2 = 0. # y-coordinate of the centers
# Compute location of all grid cell centers coordinates and store them
state.grid.compute_p_centers(recompute=True)
xp = state.grid.p_centers[0]
yp = state.grid.p_centers[1]
state.q[0,:,:] = A1*np.exp(-beta1*(np.square(xp-x1) + np.square(yp-y1)))\
+ A2*np.exp(-beta2*(np.square(xp-x2) + np.square(yp-y2))) |
Python | def velocities_upper(state,dim,t,auxbc,num_ghost):
"""
Set the velocities for the ghost cells outside the outer radius of the annulus.
"""
from mapc2p import mapc2p
grid=state.grid
mx = grid.num_cells[0]
my = grid.num_cells[1]
dxc = grid.delta[0]
dyc = grid.delta[1]
if dim == grid.dimensions[0]:
xc1d = grid.lower[0]+dxc*(np.arange(mx+num_ghost,mx+2*num_ghost+1)-num_ghost)
yc1d = grid.lower[1]+dyc*(np.arange(my+2*num_ghost+1)-num_ghost)
yc,xc = np.meshgrid(yc1d,xc1d)
xp,yp = mapc2p(xc,yc)
auxbc[:,-num_ghost:,:] = velocities_capa(xp,yp,dxc,dyc)
else:
raise Exception('Custum BC for this boundary is not appropriate!') | def velocities_upper(state,dim,t,auxbc,num_ghost):
"""
Set the velocities for the ghost cells outside the outer radius of the annulus.
"""
from mapc2p import mapc2p
grid=state.grid
mx = grid.num_cells[0]
my = grid.num_cells[1]
dxc = grid.delta[0]
dyc = grid.delta[1]
if dim == grid.dimensions[0]:
xc1d = grid.lower[0]+dxc*(np.arange(mx+num_ghost,mx+2*num_ghost+1)-num_ghost)
yc1d = grid.lower[1]+dyc*(np.arange(my+2*num_ghost+1)-num_ghost)
yc,xc = np.meshgrid(yc1d,xc1d)
xp,yp = mapc2p(xc,yc)
auxbc[:,-num_ghost:,:] = velocities_capa(xp,yp,dxc,dyc)
else:
raise Exception('Custum BC for this boundary is not appropriate!') |
Python | def velocities_lower(state,dim,t,auxbc,num_ghost):
"""
Set the velocities for the ghost cells outside the inner radius of the annulus.
"""
from mapc2p import mapc2p
grid=state.grid
my = grid.num_cells[1]
dxc = grid.delta[0]
dyc = grid.delta[1]
if dim == grid.dimensions[0]:
xc1d = grid.lower[0]+dxc*(np.arange(num_ghost+1)-num_ghost)
yc1d = grid.lower[1]+dyc*(np.arange(my+2*num_ghost+1)-num_ghost)
yc,xc = np.meshgrid(yc1d,xc1d)
xp,yp = mapc2p(xc,yc)
auxbc[:,0:num_ghost,:] = velocities_capa(xp,yp,dxc,dyc)
else:
raise Exception('Custum BC for this boundary is not appropriate!') | def velocities_lower(state,dim,t,auxbc,num_ghost):
"""
Set the velocities for the ghost cells outside the inner radius of the annulus.
"""
from mapc2p import mapc2p
grid=state.grid
my = grid.num_cells[1]
dxc = grid.delta[0]
dyc = grid.delta[1]
if dim == grid.dimensions[0]:
xc1d = grid.lower[0]+dxc*(np.arange(num_ghost+1)-num_ghost)
yc1d = grid.lower[1]+dyc*(np.arange(my+2*num_ghost+1)-num_ghost)
yc,xc = np.meshgrid(yc1d,xc1d)
xp,yp = mapc2p(xc,yc)
auxbc[:,0:num_ghost,:] = velocities_capa(xp,yp,dxc,dyc)
else:
raise Exception('Custum BC for this boundary is not appropriate!') |
Python | def stream(xp,yp):
"""
Calculates the stream function in physical space.
Clockwise rotation. One full rotation corresponds to 1 (second).
"""
streamValue = np.pi*(xp**2 + yp**2)
return streamValue | def stream(xp,yp):
"""
Calculates the stream function in physical space.
Clockwise rotation. One full rotation corresponds to 1 (second).
"""
streamValue = np.pi*(xp**2 + yp**2)
return streamValue |
Python | def verify_classic_shockbubble(test_state):
import os
from clawpack.pyclaw.util import check_diff
import numpy as np
""" verifies 2d euler shockbubble from a previously verified classic run """
test_q=test_state.get_q_global()
if test_q != None:
thisdir = os.path.dirname(__file__)
expected_density = np.loadtxt(os.path.join(thisdir,'verify_shockbubble_classic.txt'))
test_density = test_q[0,:,:]
test_err = np.linalg.norm(expected_density-test_density)
expected_err = 0
return check_diff(expected_err, test_err, abstol=1e-12) | def verify_classic_shockbubble(test_state):
import os
from clawpack.pyclaw.util import check_diff
import numpy as np
""" verifies 2d euler shockbubble from a previously verified classic run """
test_q=test_state.get_q_global()
if test_q != None:
thisdir = os.path.dirname(__file__)
expected_density = np.loadtxt(os.path.join(thisdir,'verify_shockbubble_classic.txt'))
test_density = test_q[0,:,:]
test_err = np.linalg.norm(expected_density-test_density)
expected_err = 0
return check_diff(expected_err, test_err, abstol=1e-12) |
Python | def qbc_lower_y(state,dim,t,qbc,num_ghost):
"""
Impose periodic boundary condition to q at the bottom boundary for the
sphere. This function does not work in parallel.
"""
for j in range(num_ghost):
qbc1D = np.copy(qbc[:,:,2*num_ghost-1-j])
qbc[:,:,j] = qbc1D[:,::-1] | def qbc_lower_y(state,dim,t,qbc,num_ghost):
"""
Impose periodic boundary condition to q at the bottom boundary for the
sphere. This function does not work in parallel.
"""
for j in range(num_ghost):
qbc1D = np.copy(qbc[:,:,2*num_ghost-1-j])
qbc[:,:,j] = qbc1D[:,::-1] |
Python | def qbc_upper_y(state,dim,t,qbc,num_ghost):
"""
Impose periodic boundary condition to q at the top boundary for the sphere.
This function does not work in parallel.
"""
my = state.grid.num_cells[1]
for j in range(num_ghost):
qbc1D = np.copy(qbc[:,:,my+num_ghost-1-j])
qbc[:,:,my+num_ghost+j] = qbc1D[:,::-1] | def qbc_upper_y(state,dim,t,qbc,num_ghost):
"""
Impose periodic boundary condition to q at the top boundary for the sphere.
This function does not work in parallel.
"""
my = state.grid.num_cells[1]
for j in range(num_ghost):
qbc1D = np.copy(qbc[:,:,my+num_ghost-1-j])
qbc[:,:,my+num_ghost+j] = qbc1D[:,::-1] |
Python | def auxbc_lower_y(state,dim,t,auxbc,num_ghost):
"""
Impose periodic boundary condition to aux at the bottom boundary for the
sphere.
"""
grid=state.grid
# Get parameters and variables that have to be passed to the fortran src2
# routine.
mx, my = grid.num_cells[0], grid.num_cells[1]
xlower, ylower = grid.lower[0], grid.lower[1]
dx, dy = grid.delta[0],grid.delta[1]
# Impose BC
auxtemp = auxbc.copy()
auxtemp = problem.setaux(mx,my,num_ghost,mx,my,xlower,ylower,dx,dy,auxtemp,Rsphere)
auxbc[:,:,:num_ghost] = auxtemp[:,:,:num_ghost] | def auxbc_lower_y(state,dim,t,auxbc,num_ghost):
"""
Impose periodic boundary condition to aux at the bottom boundary for the
sphere.
"""
grid=state.grid
# Get parameters and variables that have to be passed to the fortran src2
# routine.
mx, my = grid.num_cells[0], grid.num_cells[1]
xlower, ylower = grid.lower[0], grid.lower[1]
dx, dy = grid.delta[0],grid.delta[1]
# Impose BC
auxtemp = auxbc.copy()
auxtemp = problem.setaux(mx,my,num_ghost,mx,my,xlower,ylower,dx,dy,auxtemp,Rsphere)
auxbc[:,:,:num_ghost] = auxtemp[:,:,:num_ghost] |
Python | def auxbc_upper_y(state,dim,t,auxbc,num_ghost):
"""
Impose periodic boundary condition to aux at the top boundary for the
sphere.
"""
grid=state.grid
# Get parameters and variables that have to be passed to the fortran src2
# routine.
mx, my = grid.num_cells[0], grid.num_cells[1]
xlower, ylower = grid.lower[0], grid.lower[1]
dx, dy = grid.delta[0],grid.delta[1]
# Impose BC
auxtemp = auxbc.copy()
auxtemp = problem.setaux(mx,my,num_ghost,mx,my,xlower,ylower,dx,dy,auxtemp,Rsphere)
auxbc[:,:,-num_ghost:] = auxtemp[:,:,-num_ghost:] | def auxbc_upper_y(state,dim,t,auxbc,num_ghost):
"""
Impose periodic boundary condition to aux at the top boundary for the
sphere.
"""
grid=state.grid
# Get parameters and variables that have to be passed to the fortran src2
# routine.
mx, my = grid.num_cells[0], grid.num_cells[1]
xlower, ylower = grid.lower[0], grid.lower[1]
dx, dy = grid.delta[0],grid.delta[1]
# Impose BC
auxtemp = auxbc.copy()
auxtemp = problem.setaux(mx,my,num_ghost,mx,my,xlower,ylower,dx,dy,auxtemp,Rsphere)
auxbc[:,:,-num_ghost:] = auxtemp[:,:,-num_ghost:] |
Python | def burgers(use_petsc=0,kernel_language='Fortran',iplot=0,htmlplot=0,outdir='./_output',solver_type='classic'):
"""
Example python script for solving the 1d Burgers equation.
"""
import numpy as np
from clawpack import riemann
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
else:
solver = pyclaw.ClawSolver1D()
solver.limiters = pyclaw.limiters.tvd.vanleer
solver.kernel_language = kernel_language
if kernel_language=='Python':
solver.rp = riemann.rp_burgers_1d
elif kernel_language=='Fortran':
solver.rp = riemann.rp1_burgers
solver.num_waves = 1
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
#===========================================================================
# Initialize domain and then initialize the solution associated to the domain
#===========================================================================
x = pyclaw.Dimension('x',0.0,1.0,500)
domain = pyclaw.Domain(x)
num_eqn = 1
state = pyclaw.State(domain,num_eqn)
grid = state.grid
xc=grid.x.centers
state.q[0,:] = np.sin(np.pi*2*xc) + 0.50
state.problem_data['efix']=True
#===========================================================================
# Setup controller and controller parameters. Then solve the problem
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal =0.5
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir = outdir
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) | def burgers(use_petsc=0,kernel_language='Fortran',iplot=0,htmlplot=0,outdir='./_output',solver_type='classic'):
"""
Example python script for solving the 1d Burgers equation.
"""
import numpy as np
from clawpack import riemann
if use_petsc:
import clawpack.petclaw as pyclaw
else:
from clawpack import pyclaw
#===========================================================================
# Setup solver and solver parameters
#===========================================================================
if solver_type=='sharpclaw':
solver = pyclaw.SharpClawSolver1D()
else:
solver = pyclaw.ClawSolver1D()
solver.limiters = pyclaw.limiters.tvd.vanleer
solver.kernel_language = kernel_language
if kernel_language=='Python':
solver.rp = riemann.rp_burgers_1d
elif kernel_language=='Fortran':
solver.rp = riemann.rp1_burgers
solver.num_waves = 1
solver.bc_lower[0] = pyclaw.BC.periodic
solver.bc_upper[0] = pyclaw.BC.periodic
#===========================================================================
# Initialize domain and then initialize the solution associated to the domain
#===========================================================================
x = pyclaw.Dimension('x',0.0,1.0,500)
domain = pyclaw.Domain(x)
num_eqn = 1
state = pyclaw.State(domain,num_eqn)
grid = state.grid
xc=grid.x.centers
state.q[0,:] = np.sin(np.pi*2*xc) + 0.50
state.problem_data['efix']=True
#===========================================================================
# Setup controller and controller parameters. Then solve the problem
#===========================================================================
claw = pyclaw.Controller()
claw.tfinal =0.5
claw.solution = pyclaw.Solution(state,domain)
claw.solver = solver
claw.outdir = outdir
status = claw.run()
if htmlplot: pyclaw.plot.html_plot(outdir=outdir)
if iplot: pyclaw.plot.interactive_plot(outdir=outdir) |
Python | def run_app_from_main(application):
r"""
Runs an application from apps/, automatically parsing command line keyword
arguments (key=value) as parameters to the application, with positional
arguments being passed to PETSc (if it is enabled).
Perhaps we should take the PETSc approach of having a database of PyClaw
options that can be queried for options on specific objects within the
PyClaw runtime instead of front-loading everything through the application
main...
"""
# Arguments to the PyClaw should be keyword based, positional arguments
# will be passed to PETSc
petsc_args, app_kwargs = _info_from_argv(sys.argv)
if 'use_petsc' in app_kwargs and app_kwargs['use_petsc']:
import petsc4py
petsc_args = [arg.replace('--','-') for arg in sys.argv[1:] if '=' not in arg]
petsc4py.init(petsc_args)
output=application(**app_kwargs)
return output | def run_app_from_main(application):
r"""
Runs an application from apps/, automatically parsing command line keyword
arguments (key=value) as parameters to the application, with positional
arguments being passed to PETSc (if it is enabled).
Perhaps we should take the PETSc approach of having a database of PyClaw
options that can be queried for options on specific objects within the
PyClaw runtime instead of front-loading everything through the application
main...
"""
# Arguments to the PyClaw should be keyword based, positional arguments
# will be passed to PETSc
petsc_args, app_kwargs = _info_from_argv(sys.argv)
if 'use_petsc' in app_kwargs and app_kwargs['use_petsc']:
import petsc4py
petsc_args = [arg.replace('--','-') for arg in sys.argv[1:] if '=' not in arg]
petsc4py.init(petsc_args)
output=application(**app_kwargs)
return output |
Python | def gen_variants(application, verifier, kernel_languages=('Fortran',), **kwargs):
r"""
Generator of runnable variants of a test application given a verifier
Given an application, a script for verifying its output, and a
list of kernel languages to try, generates all possible variants of the
application to try by taking a product of the available kernel_languages and
(petclaw/pyclaw). For many applications, this will generate 4 variants:
the product of the two main kernel languages ('Fortran' and 'Python'), against
the the two parallel modes (petclaw and pyclaw).
For more information on how the verifier function should be implemented,
see util.test_app for a description, and util.check_diff for an example.
All unrecognized keyword arguments are passed through to the application.
"""
arg_dicts = build_variant_arg_dicts(kernel_languages)
for test_kwargs in arg_dicts:
test_kwargs.update(kwargs)
yield (test_app, application, verifier, test_kwargs)
return | def gen_variants(application, verifier, kernel_languages=('Fortran',), **kwargs):
r"""
Generator of runnable variants of a test application given a verifier
Given an application, a script for verifying its output, and a
list of kernel languages to try, generates all possible variants of the
application to try by taking a product of the available kernel_languages and
(petclaw/pyclaw). For many applications, this will generate 4 variants:
the product of the two main kernel languages ('Fortran' and 'Python'), against
the the two parallel modes (petclaw and pyclaw).
For more information on how the verifier function should be implemented,
see util.test_app for a description, and util.check_diff for an example.
All unrecognized keyword arguments are passed through to the application.
"""
arg_dicts = build_variant_arg_dicts(kernel_languages)
for test_kwargs in arg_dicts:
test_kwargs.update(kwargs)
yield (test_app, application, verifier, test_kwargs)
return |
Python | def check_diff(expected, test, **kwargs):
r"""
Checks the difference between expected and test values, return None if ok
This function expects either the keyword argument 'abstol' or 'reltol'.
"""
if 'abstol' in kwargs:
if abs(expected - test) < kwargs['abstol']: return None
else: return (expected, test, 'abstol : %s' % kwargs['abstol'])
elif 'reltol' in kwargs:
if abs((expected - test)/expected) < kwargs['reltol']: return None
else: return (expected, test, 'reltol : %s' % kwargs['reltol'])
else:
raise Exception('Incorrect use of check_diff verifier, specify tol!') | def check_diff(expected, test, **kwargs):
r"""
Checks the difference between expected and test values, return None if ok
This function expects either the keyword argument 'abstol' or 'reltol'.
"""
if 'abstol' in kwargs:
if abs(expected - test) < kwargs['abstol']: return None
else: return (expected, test, 'abstol : %s' % kwargs['abstol'])
elif 'reltol' in kwargs:
if abs((expected - test)/expected) < kwargs['reltol']: return None
else: return (expected, test, 'reltol : %s' % kwargs['reltol'])
else:
raise Exception('Incorrect use of check_diff verifier, specify tol!') |
Python | def compile_library(source_list,module_name,interface_functions=[],
local_path='./',library_path='./',f2py_flags='',
FC=None,FFLAGS=None,recompile=False,clean=False):
r"""
Compiles and wraps fortran source into a callable module in python.
This function uses f2py to create an interface from python to the fortran
sources in source_list. The source_list can either be a list of names
of source files in which case compile_library will search for the file in
local_path and then in library_path. If a path is given, the file will be
checked to see if it exists, if not it will look for the file in the above
resolution order. If any source file is not found, an IOException is
raised.
The list interface_functions allows the user to specify which fortran
functions are actually available to python. The interface functions are
assumed to be in the file with their name, i.e. claw1 is located in
'claw1.f95' or 'claw1.f'.
The interface from fortran may be different than the original function
call in fortran so the user should make sure to check the automatically
created doc string for the fortran module for proper use.
Source files will not be recompiled if they have not been changed.
One set of options of note is for enabling OpenMP, it requires the usual
fortran flags but the OpenMP library also must be compiled in, this is
done with the flag -lgomp. The call to compile_library would then be:
compile_library(src,module_name,f2py_flags='-lgomp',FFLAGS='-fopenmp')
For complete optimization use:
FFLAGS='-O3 -fopenmp -funroll-loops -finline-functions -fdefault-real-8'
:Input:
- *source_list* - (list of strings) List of source files, if these are
just names of the source files, i.e. 'bc1.f' then they will be searched
for in the default source resolution order, if an explicit path is
given, i.e. './bc1.f', then the function will use that source if it can
find it.
- *module_name* - (string) Name of the resulting module
- *interface_functions* - (list of strings) List of function names to
provide access to, if empty, all functions are accessible to python.
Defaults to [].
- *local_path* - (string) The base path for source resolution, defaults
to './'.
- *library_path* - (string) The library path for source resolution,
defaults to './'.
- *f2py_flags* - (string) f2py flags to be passed
- *FC* - (string) Override the environment variable FC and use it to
compile, note that this does not replace the compiler that f2py uses,
only the object file compilation (functions that do not have
interfaces)
- *FFLAGS* - (string) Override the environment variable FFLAGS and pass
them to the fortran compiler
- *recompile* - (bool) Force recompilation of the library, defaults to
False
- *clean* - (bool) Force a clean build of all source files
"""
# Setup logger
logger = logging.getLogger('f2py')
temp_file = tempfile.TemporaryFile()
logger.info('Compiling %s' % module_name)
# Force recompile if the clean flag is set
if clean:
recompile = True
# Expand local_path and library_path
local_path = os.path.expandvars(local_path)
local_path = os.path.expanduser(local_path)
library_path = os.path.expandvars(library_path)
library_path = os.path.expanduser(library_path)
# Fetch environment variables we need for compilation
if FC is None:
if os.environ.has_key('FC'):
FC = os.environ['FC']
else:
FC = 'gfortran'
if FFLAGS is None:
if os.environ.has_key('FFLAGS'):
FFLAGS = os.environ['FFLAGS']
else:
FFLAGS = ''
# Create the list of paths to sources
path_list = []
for source in source_list:
# Check to see if the source looks like a path, i.e. it contains the
# os.path.sep character
if source.find(os.path.sep) >= 0:
source = os.path.expandvars(source)
source = os.path.expanduser(source)
# This is a path, check to see if it's valid
if os.path.exists(source):
path_list.append(source)
continue
# Otherwise, take the last part of the path and try searching for
# it in the resolution order
source = os.path.split(source)
# Search for the source file in local_path and then library_path
if os.path.exists(os.path.join(local_path,source)):
path_list.append(os.path.join(local_path,source))
continue
elif os.path.exists(os.path.join(library_path,source)):
path_list.append(os.path.join(library_path,source))
continue
else:
raise IOError('Could not find source file %s' % source)
# Compile each of the source files if the object files are not present or
# if the modification date of the source file is newer than the object
# file's creation date
object_list = []
src_list = []
for path in path_list:
object_path = os.path.join(os.path.split(path)[0],
'.'.join((os.path.split(path)[1].split('.')[:-1][0],'o')))
# Check to see if this path contains one of the interface functions
if os.path.split(path)[1].split('.')[:-1][0] in interface_functions:
src_list.append(path)
continue
# If there are no interface functions specified, then all source files
# must be included in the f2py call
elif len(interface_functions) == 0:
src_list.append(path)
continue
if os.path.exists(object_path) and not clean:
# Check to see if the modification date of the source file is
# greater than the object file
if os.path.getmtime(object_path) > os.path.getmtime(path):
object_list.append(object_path)
continue
# Compile the source file into the object file
command = '%s %s -c %s -o %s' % (FC,FFLAGS,path,object_path)
logger.debug(command)
subprocess.call(command,shell=True,stdout=temp_file)
object_list.append(object_path)
# Check to see if recompile is needed
if not recompile:
module_path = os.path.join('.','.'.join((module_name,'so')))
if os.path.exists(module_path):
for src in src_list:
if os.path.getmtime(module_path) < os.path.getmtime(src):
recompile = True
break
for obj in object_list:
if os.path.getmtime(module_path) < os.path.getmtime(obj):
recompile = True
break
else:
recompile = True
if recompile:
# Wrap the object files into a python module
f2py_command = "f2py -c"
# Add standard compiler flags
f2py_command = ' '.join((f2py_command,f2py_flags))
f2py_command = ' '.join((f2py_command,"--f90flags='%s'" % FFLAGS))
# Add module names
f2py_command = ' '.join((f2py_command,'-m %s' % module_name))
# Add source files
f2py_command = ' '.join((f2py_command,' '.join(src_list)))
# Add object files
f2py_command = ' '.join((f2py_command,' '.join(object_list)))
# Add interface functions
if len(interface_functions) > 0:
f2py_command = ' '.join( (f2py_command,'only:') )
for interface in interface_functions:
f2py_command = ' '.join( (f2py_command,interface) )
f2py_command = ''.join( (f2py_command,' :') )
logger.debug(f2py_command)
status = subprocess.call(f2py_command,shell=True,stdout=temp_file)
if status == 0:
logger.info("Module %s compiled" % module_name)
else:
logger.info("Module %s failed to compile with code %s" % (module_name,status))
sys.exit(13)
else:
logger.info("Module %s is up to date." % module_name)
temp_file.seek(0)
logger.debug(temp_file.read())
temp_file.close() | def compile_library(source_list,module_name,interface_functions=[],
local_path='./',library_path='./',f2py_flags='',
FC=None,FFLAGS=None,recompile=False,clean=False):
r"""
Compiles and wraps fortran source into a callable module in python.
This function uses f2py to create an interface from python to the fortran
sources in source_list. The source_list can either be a list of names
of source files in which case compile_library will search for the file in
local_path and then in library_path. If a path is given, the file will be
checked to see if it exists, if not it will look for the file in the above
resolution order. If any source file is not found, an IOException is
raised.
The list interface_functions allows the user to specify which fortran
functions are actually available to python. The interface functions are
assumed to be in the file with their name, i.e. claw1 is located in
'claw1.f95' or 'claw1.f'.
The interface from fortran may be different than the original function
call in fortran so the user should make sure to check the automatically
created doc string for the fortran module for proper use.
Source files will not be recompiled if they have not been changed.
One set of options of note is for enabling OpenMP, it requires the usual
fortran flags but the OpenMP library also must be compiled in, this is
done with the flag -lgomp. The call to compile_library would then be:
compile_library(src,module_name,f2py_flags='-lgomp',FFLAGS='-fopenmp')
For complete optimization use:
FFLAGS='-O3 -fopenmp -funroll-loops -finline-functions -fdefault-real-8'
:Input:
- *source_list* - (list of strings) List of source files, if these are
just names of the source files, i.e. 'bc1.f' then they will be searched
for in the default source resolution order, if an explicit path is
given, i.e. './bc1.f', then the function will use that source if it can
find it.
- *module_name* - (string) Name of the resulting module
- *interface_functions* - (list of strings) List of function names to
provide access to, if empty, all functions are accessible to python.
Defaults to [].
- *local_path* - (string) The base path for source resolution, defaults
to './'.
- *library_path* - (string) The library path for source resolution,
defaults to './'.
- *f2py_flags* - (string) f2py flags to be passed
- *FC* - (string) Override the environment variable FC and use it to
compile, note that this does not replace the compiler that f2py uses,
only the object file compilation (functions that do not have
interfaces)
- *FFLAGS* - (string) Override the environment variable FFLAGS and pass
them to the fortran compiler
- *recompile* - (bool) Force recompilation of the library, defaults to
False
- *clean* - (bool) Force a clean build of all source files
"""
# Setup logger
logger = logging.getLogger('f2py')
temp_file = tempfile.TemporaryFile()
logger.info('Compiling %s' % module_name)
# Force recompile if the clean flag is set
if clean:
recompile = True
# Expand local_path and library_path
local_path = os.path.expandvars(local_path)
local_path = os.path.expanduser(local_path)
library_path = os.path.expandvars(library_path)
library_path = os.path.expanduser(library_path)
# Fetch environment variables we need for compilation
if FC is None:
if os.environ.has_key('FC'):
FC = os.environ['FC']
else:
FC = 'gfortran'
if FFLAGS is None:
if os.environ.has_key('FFLAGS'):
FFLAGS = os.environ['FFLAGS']
else:
FFLAGS = ''
# Create the list of paths to sources
path_list = []
for source in source_list:
# Check to see if the source looks like a path, i.e. it contains the
# os.path.sep character
if source.find(os.path.sep) >= 0:
source = os.path.expandvars(source)
source = os.path.expanduser(source)
# This is a path, check to see if it's valid
if os.path.exists(source):
path_list.append(source)
continue
# Otherwise, take the last part of the path and try searching for
# it in the resolution order
source = os.path.split(source)
# Search for the source file in local_path and then library_path
if os.path.exists(os.path.join(local_path,source)):
path_list.append(os.path.join(local_path,source))
continue
elif os.path.exists(os.path.join(library_path,source)):
path_list.append(os.path.join(library_path,source))
continue
else:
raise IOError('Could not find source file %s' % source)
# Compile each of the source files if the object files are not present or
# if the modification date of the source file is newer than the object
# file's creation date
object_list = []
src_list = []
for path in path_list:
object_path = os.path.join(os.path.split(path)[0],
'.'.join((os.path.split(path)[1].split('.')[:-1][0],'o')))
# Check to see if this path contains one of the interface functions
if os.path.split(path)[1].split('.')[:-1][0] in interface_functions:
src_list.append(path)
continue
# If there are no interface functions specified, then all source files
# must be included in the f2py call
elif len(interface_functions) == 0:
src_list.append(path)
continue
if os.path.exists(object_path) and not clean:
# Check to see if the modification date of the source file is
# greater than the object file
if os.path.getmtime(object_path) > os.path.getmtime(path):
object_list.append(object_path)
continue
# Compile the source file into the object file
command = '%s %s -c %s -o %s' % (FC,FFLAGS,path,object_path)
logger.debug(command)
subprocess.call(command,shell=True,stdout=temp_file)
object_list.append(object_path)
# Check to see if recompile is needed
if not recompile:
module_path = os.path.join('.','.'.join((module_name,'so')))
if os.path.exists(module_path):
for src in src_list:
if os.path.getmtime(module_path) < os.path.getmtime(src):
recompile = True
break
for obj in object_list:
if os.path.getmtime(module_path) < os.path.getmtime(obj):
recompile = True
break
else:
recompile = True
if recompile:
# Wrap the object files into a python module
f2py_command = "f2py -c"
# Add standard compiler flags
f2py_command = ' '.join((f2py_command,f2py_flags))
f2py_command = ' '.join((f2py_command,"--f90flags='%s'" % FFLAGS))
# Add module names
f2py_command = ' '.join((f2py_command,'-m %s' % module_name))
# Add source files
f2py_command = ' '.join((f2py_command,' '.join(src_list)))
# Add object files
f2py_command = ' '.join((f2py_command,' '.join(object_list)))
# Add interface functions
if len(interface_functions) > 0:
f2py_command = ' '.join( (f2py_command,'only:') )
for interface in interface_functions:
f2py_command = ' '.join( (f2py_command,interface) )
f2py_command = ''.join( (f2py_command,' :') )
logger.debug(f2py_command)
status = subprocess.call(f2py_command,shell=True,stdout=temp_file)
if status == 0:
logger.info("Module %s compiled" % module_name)
else:
logger.info("Module %s failed to compile with code %s" % (module_name,status))
sys.exit(13)
else:
logger.info("Module %s is up to date." % module_name)
temp_file.seek(0)
logger.debug(temp_file.read())
temp_file.close() |
Python | def construct_function_handle(path,function_name=None):
r"""
Constructs a function handle from the file at path.
This function will attempt to construct a function handle from the python
file at path.
:Input:
- *path* - (string) Path to the file containing the function
- *function_name* - (string) Name of the function defined in the file
that the handle will point to. Defaults to the same name as the file
without the extension.
:Output:
- (func) Function handle to the constructed function, None if this has
failed.
"""
# Determine the resulting function_name
if function_name is None:
function_name = path.split('/')[-1].split('.')[0]
full_path = os.path.abspath(path)
if os.path.exists(full_path):
suffix = path.split('.')[-1]
# This is a python file and we just need to read it and map it
if suffix in ['py']:
execfile(full_path,globals())
return eval('%s' % function_name)
else:
raise Exception("Invalid file type for function handle.")
else:
raise Exception("Invalid file path %s" % path) | def construct_function_handle(path,function_name=None):
r"""
Constructs a function handle from the file at path.
This function will attempt to construct a function handle from the python
file at path.
:Input:
- *path* - (string) Path to the file containing the function
- *function_name* - (string) Name of the function defined in the file
that the handle will point to. Defaults to the same name as the file
without the extension.
:Output:
- (func) Function handle to the constructed function, None if this has
failed.
"""
# Determine the resulting function_name
if function_name is None:
function_name = path.split('/')[-1].split('.')[0]
full_path = os.path.abspath(path)
if os.path.exists(full_path):
suffix = path.split('.')[-1]
# This is a python file and we just need to read it and map it
if suffix in ['py']:
execfile(full_path,globals())
return eval('%s' % function_name)
else:
raise Exception("Invalid file type for function handle.")
else:
raise Exception("Invalid file path %s" % path) |
Subsets and Splits