code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
---|---|---|---|---|---|---|---|
def from_mol_file(cls, mol_file: Path, *, caption: Optional[str] = None, **kwargs) -> "Molecule":
"""Creates a Molecule instance from a Mol file.
Args:
mol_file: Path to the Mol file.
caption: Optional descriptive text.
Returns:
Molecule: A new Molecule instance.
Raises:
ValueError: If RDKit is not available or the file cannot be read.
"""
cls.check_is_available()
mol = Chem.MolFromMolFile(str(mol_file))
if mol is None:
raise ValueError(f"Could not read molecule from Mol file: {mol_file}")
return cls.from_mol(mol, caption=caption, **kwargs)
|
Creates a Molecule instance from a Mol file.
Args:
mol_file: Path to the Mol file.
caption: Optional descriptive text.
Returns:
Molecule: A new Molecule instance.
Raises:
ValueError: If RDKit is not available or the file cannot be read.
|
from_mol_file
|
python
|
SwanHubX/SwanLab
|
swanlab/data/modules/object3d/molecule.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/modules/object3d/molecule.py
|
Apache-2.0
|
def parse(self) -> Tuple[str, MediaBuffer]:
"""Convert Molecule PDB to buffer for transmission.
Returns:
Tuple containing:
- File name with format: molecule-step{step}-{hash}.pdb
- MediaBuffer containing the molecule pdb data
"""
data = self.pdb_data.encode()
buffer = MediaBuffer()
buffer.write(data)
hash_name = D.get_hash_by_bytes(data)[:16]
save_name = f"molecule-step{self.step}-{hash_name}.pdb"
return save_name, buffer
|
Convert Molecule PDB to buffer for transmission.
Returns:
Tuple containing:
- File name with format: molecule-step{step}-{hash}.pdb
- MediaBuffer containing the molecule pdb data
|
parse
|
python
|
SwanHubX/SwanLab
|
swanlab/data/modules/object3d/molecule.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/modules/object3d/molecule.py
|
Apache-2.0
|
def from_xyz(cls, points: ndarray, *, caption: Optional[str] = None, **kwargs) -> "PointCloud":
"""Create PointCloud from XYZ coordinates.
Args:
points: numpy array with shape (N, 3) containing XYZ coordinates
caption: Optional description text
Returns:
PointCloud object with default green color
Examples:
>>> points = np.random.rand(100, 3)
>>> pc = PointCloud.from_xyz(points) # Default green
>>> pc = PointCloud.from_xyz(points, caption="Green Points")
"""
if points.ndim != 2 or points.shape[1] != 3:
raise ValueError("XYZ array must have shape (N, 3)")
default_color = np.array([0, 255, 0]) # green
xyzrgb = np.zeros((points.shape[0], 6))
xyzrgb[:, :3] = points # copy XYZ coordinates
xyzrgb[:, 3:] = default_color # set default RGB (0, 255, 0) to green
return cls(xyzrgb, caption=caption, **kwargs)
|
Create PointCloud from XYZ coordinates.
Args:
points: numpy array with shape (N, 3) containing XYZ coordinates
caption: Optional description text
Returns:
PointCloud object with default green color
Examples:
>>> points = np.random.rand(100, 3)
>>> pc = PointCloud.from_xyz(points) # Default green
>>> pc = PointCloud.from_xyz(points, caption="Green Points")
|
from_xyz
|
python
|
SwanHubX/SwanLab
|
swanlab/data/modules/object3d/point_cloud.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/modules/object3d/point_cloud.py
|
Apache-2.0
|
def from_xyzc(cls, points: ndarray, *, caption: Optional[str] = None, **kwargs) -> "PointCloud":
"""Create PointCloud from XYZC format (XYZ coordinates + category).
Args:
points: numpy array with shape (N, 4) containing XYZC values
where C is category index (integer)
caption: Optional description text
Returns:
PointCloud object with colors mapped from categories
Examples:
>>> points = np.zeros((100, 4))
>>> points[:, :3] = coordinates # XYZ coordinates
>>> points[:, 3] = categories # Category labels (0,1,2...)
>>> pc = PointCloud.from_xyzc(points)
>>> pc = PointCloud.from_xyzc(points, caption="Segmented Points")
"""
if points.ndim != 2 or points.shape[1] != 4:
raise ValueError("XYZC array must have shape (N, 4)")
# For xyzc format, map categories to predefined colors
categories = points[:, 3].astype(int)
colors = np.array([hex_to_rgb(c) for c in light_colors])
xyzrgb = np.zeros((points.shape[0], 6))
xyzrgb[:, :3] = points[:, :3] # copy XYZ coordinates
xyzrgb[:, 3:] = colors[categories]
return cls(xyzrgb, caption=caption, **kwargs)
|
Create PointCloud from XYZC format (XYZ coordinates + category).
Args:
points: numpy array with shape (N, 4) containing XYZC values
where C is category index (integer)
caption: Optional description text
Returns:
PointCloud object with colors mapped from categories
Examples:
>>> points = np.zeros((100, 4))
>>> points[:, :3] = coordinates # XYZ coordinates
>>> points[:, 3] = categories # Category labels (0,1,2...)
>>> pc = PointCloud.from_xyzc(points)
>>> pc = PointCloud.from_xyzc(points, caption="Segmented Points")
|
from_xyzc
|
python
|
SwanHubX/SwanLab
|
swanlab/data/modules/object3d/point_cloud.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/modules/object3d/point_cloud.py
|
Apache-2.0
|
def from_xyzrgb(cls, points: ndarray, *, caption: Optional[str] = None, **kwargs) -> "PointCloud":
"""Create PointCloud from XYZRGB format.
Args:
points: numpy array with shape (N, 6) containing XYZRGB values
caption: Optional description text
Returns:
PointCloud object
Examples:
>>> points = np.zeros((100, 6))
>>> points[:, :3] = coordinates # XYZ coordinates
>>> points[:, 3:] = colors # RGB values (0-255)
>>> pc = PointCloud.from_xyzrgb(points)
>>> pc = PointCloud.from_xyzrgb(points, step=1, caption="Colored Points")
"""
if points.ndim != 2 or points.shape[1] != 6:
raise ValueError("XYZRGB array must have shape (N, 6)")
return cls(points, caption=caption, **kwargs)
|
Create PointCloud from XYZRGB format.
Args:
points: numpy array with shape (N, 6) containing XYZRGB values
caption: Optional description text
Returns:
PointCloud object
Examples:
>>> points = np.zeros((100, 6))
>>> points[:, :3] = coordinates # XYZ coordinates
>>> points[:, 3:] = colors # RGB values (0-255)
>>> pc = PointCloud.from_xyzrgb(points)
>>> pc = PointCloud.from_xyzrgb(points, step=1, caption="Colored Points")
|
from_xyzrgb
|
python
|
SwanHubX/SwanLab
|
swanlab/data/modules/object3d/point_cloud.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/modules/object3d/point_cloud.py
|
Apache-2.0
|
def from_swanlab_pts(cls, data: Dict, *, caption: Optional[str] = None, **kwargs) -> "PointCloud":
"""Create PointCloud from SwanLab pts data dictionary.
Args:
data: A dictionary containing 'points' (required) and optionally 'boxes'.
'points' can be a list of lists (XYZ, XYZC, or XYZRGB) or a NumPy array.
'boxes' is an optional list of dictionaries, each representing a bounding box.
caption: Optional description text
Returns:
PointCloud object
"""
if not isinstance(data, dict) or "points" not in data:
raise ValueError("Invalid data format")
points = data["points"]
if isinstance(points, list):
points = np.array(points) # Convert list to NumPy array
elif not isinstance(points, ndarray):
raise TypeError("data['points'] must be a list or a NumPy array")
if points.ndim != 2:
raise ValueError("data['points'] must be a 2D array")
handler = {3: cls.from_xyz, 4: cls.from_xyzc, 6: cls.from_xyzrgb}
try:
pc = handler[points.shape[1]](points, caption=caption, **kwargs)
except KeyError as err:
raise ValueError("data['points'] must have shape (N, 3), (N, 4), or (N, 6)") from err
# Add boxes
boxes_data = data.get("boxes")
if boxes_data is None:
return pc
if not isinstance(boxes_data, list):
raise TypeError("data['boxes'] must be a list")
for box_data in boxes_data:
try:
box: Box = {
"color": tuple(box_data["color"]),
"corners": [tuple(p) for p in box_data["corners"]],
"label": box_data["label"],
}
if "score" in box_data:
box["score"] = box_data["score"]
pc.append_box(box)
except (KeyError, TypeError) as err:
raise ValueError(f"Invalid box format: {err}") from err
return pc
|
Create PointCloud from SwanLab pts data dictionary.
Args:
data: A dictionary containing 'points' (required) and optionally 'boxes'.
'points' can be a list of lists (XYZ, XYZC, or XYZRGB) or a NumPy array.
'boxes' is an optional list of dictionaries, each representing a bounding box.
caption: Optional description text
Returns:
PointCloud object
|
from_swanlab_pts
|
python
|
SwanHubX/SwanLab
|
swanlab/data/modules/object3d/point_cloud.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/modules/object3d/point_cloud.py
|
Apache-2.0
|
def from_swanlab_pts_json_file(cls, path: Path, *, caption: Optional[str] = None, **kwargs) -> "PointCloud":
"""Create PointCloud from SwanLab pts.json file.
Args:
path: Path to the .swanlab.pts.json file
caption: Optional description text
Returns:
PointCloud object
Examples:
>>> pc = PointCloud.from_swanlab_pts_json_file(
... Path("points.swanlab.pts.json"),
... caption="Loaded Points"
... )
"""
if not path.exists():
raise FileNotFoundError(f"File not found: {path}")
if not path.is_file():
raise ValueError(f"Path is not a file: {path}")
try:
with open(path) as f:
points_list = json.load(f)
return cls.from_swanlab_pts(points_list, caption=caption, **kwargs)
except json.JSONDecodeError as e:
raise ValueError(f"Invalid JSON format in file: {path}") from e
except Exception as e:
raise ValueError(f"Error reading file {path}: {str(e)}") from e
|
Create PointCloud from SwanLab pts.json file.
Args:
path: Path to the .swanlab.pts.json file
caption: Optional description text
Returns:
PointCloud object
Examples:
>>> pc = PointCloud.from_swanlab_pts_json_file(
... Path("points.swanlab.pts.json"),
... caption="Loaded Points"
... )
|
from_swanlab_pts_json_file
|
python
|
SwanHubX/SwanLab
|
swanlab/data/modules/object3d/point_cloud.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/modules/object3d/point_cloud.py
|
Apache-2.0
|
def parse(self) -> Tuple[str, MediaBuffer]:
"""Convert point cloud to buffer for transmission.
Returns:
Tuple containing:
- File name with format: pointcloud-step{step}-{hash}.swanlab.pts.json
- MediaBuffer containing the point cloud data
"""
buffer = MediaBuffer()
points_list = self.points.tolist()
swanlab_pts = {
"version": self._VERSION,
"points": points_list,
"boxes": self.boxes,
}
json_str = json.dumps(swanlab_pts)
buffer.write(json_str.encode())
hash_name = D.get_hash_by_ndarray(self.points)[:16]
save_name = f"pointcloud-step{self.step}-{hash_name}.swanlab.pts.json"
return save_name, buffer
|
Convert point cloud to buffer for transmission.
Returns:
Tuple containing:
- File name with format: pointcloud-step{step}-{hash}.swanlab.pts.json
- MediaBuffer containing the point cloud data
|
parse
|
python
|
SwanHubX/SwanLab
|
swanlab/data/modules/object3d/point_cloud.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/modules/object3d/point_cloud.py
|
Apache-2.0
|
def get(self, name: str, default=None):
"""
Get the value of a configuration item. If the item does not exist, raise AttributeError.
"""
try:
return self.__config[name]
except KeyError:
return default
|
Get the value of a configuration item. If the item does not exist, raise AttributeError.
|
get
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/config.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/config.py
|
Apache-2.0
|
def __setitem__(self, name: str, value: Any) -> None:
"""
Set the value of a configuration item. If the item does not exist, create it.
User are not allowed to set private attributes.
"""
name = str(name)
self.__config[name] = parse(value)
self.__save()
|
Set the value of a configuration item. If the item does not exist, create it.
User are not allowed to set private attributes.
|
__setitem__
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/config.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/config.py
|
Apache-2.0
|
def set(self, name: str, value: Any):
"""
Explicitly set the value of a configuration item and save it.
Private attributes are not allowed to be set.
"""
name = str(name)
self.__config[name] = parse(value)
self.__save()
|
Explicitly set the value of a configuration item and save it.
Private attributes are not allowed to be set.
|
set
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/config.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/config.py
|
Apache-2.0
|
def pop(self, name: str):
"""
Delete a configuration item; if the item does not exist, skip.
"""
try:
t = self.__config[name]
del self.__config[name]
self.__save()
return t
except KeyError:
return None
|
Delete a configuration item; if the item does not exist, skip.
|
pop
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/config.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/config.py
|
Apache-2.0
|
def update(self, __m: Union[MutableMapping, argparse.Namespace] = None, **kwargs):
"""
Update the configuration with the key/value pairs from __m, overwriting existing keys.
"""
if __m is not None:
for k, v in parse(__m).items():
self.__config[k] = v
for k, v in kwargs.items():
self.__config[k] = parse(v)
self.__save()
|
Update the configuration with the key/value pairs from __m, overwriting existing keys.
|
update
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/config.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/config.py
|
Apache-2.0
|
def clean(self):
"""
Clean the configuration.
Attention: This method will reset the instance and instance will not automatically save the configuration.
"""
self.__config.clear()
self.__on_setter = None
|
Clean the configuration.
Attention: This method will reset the instance and instance will not automatically save the configuration.
|
clean
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/config.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/config.py
|
Apache-2.0
|
def __flatten_dict(self, d: dict, parent_key='', sep='.') -> dict:
"""Helper method to flatten nested dictionaries with dot notation"""
items = []
for k, v in d.items():
new_key = f"{parent_key}{sep}{k}" if parent_key else k
if isinstance(v, dict):
items.extend(self.__flatten_dict(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
|
Helper method to flatten nested dictionaries with dot notation
|
__flatten_dict
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/main.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/main.py
|
Apache-2.0
|
def get_url() -> Optional["str"]:
"""
Get the url of the current experiment.
NOTE: return None if the experiment has not been initialized or mode is not 'cloud'.
"""
global run
if run is None:
return None
return run.public.cloud.experiment_url
|
Get the url of the current experiment.
NOTE: return None if the experiment has not been initialized or mode is not 'cloud'.
|
get_url
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/main.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/main.py
|
Apache-2.0
|
def get_project_url() -> Optional["str"]:
"""
Get the url of the current project.
NOTE: return None if the experiment has not been initialized or mode is not 'cloud'.
"""
global run
if run is None:
return None
return run.public.cloud.project_url
|
Get the url of the current project.
NOTE: return None if the experiment has not been initialized or mode is not 'cloud'.
|
get_project_url
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/main.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/main.py
|
Apache-2.0
|
def __get_property_from_http(self, name: str):
"""
Get the property from the http object.
if the http object is None, it will be initialized.
if initialization fails, it will return None.
"""
if self.available:
return getattr(self.__http, name)
return None
|
Get the property from the http object.
if the http object is None, it will be initialized.
if initialization fails, it will return None.
|
__get_property_from_http
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/public.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/public.py
|
Apache-2.0
|
def project_url(self):
"""
The url of the project. It is the url of the project page on the SwanLab.
If swanlab is not running in cloud mode, it will return None.
"""
if not self.available:
return None
return self.__get_property_from_http("web_proj_url")
|
The url of the project. It is the url of the project page on the SwanLab.
If swanlab is not running in cloud mode, it will return None.
|
project_url
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/public.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/public.py
|
Apache-2.0
|
def experiment_url(self):
"""
The url of the experiment. It is the url of the experiment page on the SwanLab.
"""
if not self.available:
return None
return self.__get_property_from_http("web_exp_url")
|
The url of the experiment. It is the url of the experiment page on the SwanLab.
|
experiment_url
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/public.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/public.py
|
Apache-2.0
|
def json(self):
"""
Return a dict of the public config.
This method is used to serialize the public config to json.
"""
return {
"project_name": self.project_name,
"version": self.version,
"run_id": self.run_id,
"swanlog_dir": self.swanlog_dir,
"run_dir": self.run_dir,
"cloud": {
"project_name": self.cloud.project_name,
"project_url": self.cloud.project_url,
"experiment_name": self.cloud.experiment_name,
"experiment_url": self.cloud.experiment_url,
},
}
|
Return a dict of the public config.
This method is used to serialize the public config to json.
|
json
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/public.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/public.py
|
Apache-2.0
|
def parse_git_url(url):
"""Return the remote URL of a git repository."""
if url.startswith("git@"):
parts = url[4:].split("/", 1)
host, path = parts[0], parts[1] if len(parts) > 1 else ""
if ":" in host:
host, port = host.rsplit(":", 1)
url = f"https://{host}:{port}/{path}" if port.isdigit() else f"https://{host}/{port}/{path}"
else:
url = f"https://{host}/{path}"
return url[:-4] if url.endswith(".git") else url
|
Return the remote URL of a git repository.
|
parse_git_url
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/metadata/runtime.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/metadata/runtime.py
|
Apache-2.0
|
def replace_second_colon(input_string, replacement):
"""Replace the second colon in a string."""
first_colon = input_string.find(":")
second_colon = input_string.find(":", first_colon + 1) if first_colon != -1 else -1
return (
input_string[:second_colon] + replacement + input_string[second_colon + 1 :]
if second_colon != -1
else input_string
)
|
Replace the second colon in a string.
|
replace_second_colon
|
python
|
SwanHubX/SwanLab
|
swanlab/data/run/metadata/runtime.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/data/run/metadata/runtime.py
|
Apache-2.0
|
def store_init_configuration(self, values: dict):
"""
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
Args:
values (Dictionary `str` to `bool`, `str`, `float` or `int`):
Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
`str`, `float`, `int`, or `None`.
"""
import swanlab
swanlab.config.update(values, allow_val_change=True)
logger.debug("Stored initial configuration hyperparameters to SwanLab")
|
Logs `values` as hyperparameters for the run. Should be run at the beginning of your experiment.
Args:
values (Dictionary `str` to `bool`, `str`, `float` or `int`):
Values to be stored as initial hyperparameters as key-value pairs. The values need to have type `bool`,
`str`, `float`, `int`, or `None`.
|
store_init_configuration
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/accelerate.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/accelerate.py
|
Apache-2.0
|
def log(self, values: dict, step: Optional[int] = None, **kwargs):
"""
Logs `values` to the current run.
Args:
data : Dict[str, DataType]
Data must be a dict.
The key must be a string with 0-9, a-z, A-Z, " ", "_", "-", "/".
The value must be a `float`, `float convertible object`, `int` or `swanlab.data.BaseType`.
step : int, optional
The step number of the current data, if not provided, it will be automatically incremented.
If step is duplicated, the data will be ignored.
kwargs:
Additional key word arguments passed along to the `swanlab.log` method. Likes:
print_to_console : bool, optional
Whether to print the data to the console, the default is False.
"""
self.run.log(values, step=step, **kwargs)
logger.debug("Successfully logged to SwanLab")
|
Logs `values` to the current run.
Args:
data : Dict[str, DataType]
Data must be a dict.
The key must be a string with 0-9, a-z, A-Z, " ", "_", "-", "/".
The value must be a `float`, `float convertible object`, `int` or `swanlab.data.BaseType`.
step : int, optional
The step number of the current data, if not provided, it will be automatically incremented.
If step is duplicated, the data will be ignored.
kwargs:
Additional key word arguments passed along to the `swanlab.log` method. Likes:
print_to_console : bool, optional
Whether to print the data to the console, the default is False.
|
log
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/accelerate.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/accelerate.py
|
Apache-2.0
|
def log_images(self, values: dict, step: Optional[int] = None, **kwargs):
"""
Logs `images` to the current run.
Args:
values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs:
Additional key word arguments passed along to the `swanlab.log` method. Likes:
print_to_console : bool, optional
Whether to print the data to the console, the default is False.
"""
import swanlab
for k, v in values.items():
self.log({k: [swanlab.Image(image) for image in v]}, step=step, **kwargs)
logger.debug("Successfully logged images to SwanLab")
|
Logs `images` to the current run.
Args:
values (Dictionary `str` to `List` of `np.ndarray` or `PIL.Image`):
Values to be logged as key-value pairs. The values need to have type `List` of `np.ndarray` or
step (`int`, *optional*):
The run step. If included, the log will be affiliated with this step.
kwargs:
Additional key word arguments passed along to the `swanlab.log` method. Likes:
print_to_console : bool, optional
Whether to print the data to the console, the default is False.
|
log_images
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/accelerate.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/accelerate.py
|
Apache-2.0
|
def gather_args(self):
"Gather config parameters accessible to the learner"
cb_args = {f"{cb}": getattr(cb, "__stored_args__", True) for cb in self.cbs if cb != self}
args = {"Learner": self.learn, **cb_args}
try:
n_inp = self.dls.train.n_inp
args["n_inp"] = n_inp
xb = self.dls.valid.one_batch()[:n_inp]
args.update(
{f"input {n+1} dim {i+1}": d for n in range(n_inp) for i, d in enumerate(list(detuplify(xb[n]).shape))}
)
except Exception:
swl.warning("Failed to gather input dimensions")
with ignore_exceptions():
args["batch_size"] = self.dls.bs
args["batch_per_epoch"] = len(self.dls.train)
args["model_parameters"] = total_params(self.model)[0]
args["device"] = self.dls.device.type
args["frozen"] = bool(self.opt.frozen_idx)
args["frozen_idx"] = self.opt.frozen_idx
args["dataset/tfms"] = f"{self.dls.dataset.tfms}"
args["dls/after_item"] = f"{self.dls.after_item}"
args["dls/before_batch"] = f"{self.dls.before_batch}"
args["dls/after_batch"] = f"{self.dls.after_batch}"
return args
|
Gather config parameters accessible to the learner
|
gather_args
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/fastai.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/fastai.py
|
Apache-2.0
|
def __init__(self,
project: Optional[str] = None,
workspace: Optional[str] = None,
experiment_name: Optional[str] = None,
description: Optional[str] = None,
logdir: Optional[str] = None,
mode: Optional[str] = None,
**kwargs: Any,):
"""
To use the `SwanLabCallback`, pass it into the `callback` parameter when initializing the `transformers.Trainer`.
This allows the Trainer to utilize SwanLab's logging and monitoring functionalities during the training process.
Parameters same with `swanlab.init`. Finds more informations
[here](https://docs.swanlab.cn/api/py-init.html#swanlab-init)
Parameters
----------
project : str, optional
The project name of the current experiment, the default is None,
which means the current project name is the same as the current working directory.
workspace : str, optional
Where the current project is located, it can be an organization or a user (currently only supports yourself).
The default is None, which means the current entity is the same as the current user.
experiment_name : str, optional
The experiment name you currently have open. If this parameter is not provided,
SwanLab will generate one for you by default.
description : str, optional
The experiment description you currently have open,
used for a more detailed introduction or labeling of the current experiment.
If you do not provide this parameter, you can modify it later in the web interface.
logdir : str, optional
The folder will store all the log information generated during the execution of SwanLab.
If the parameter is None,
SwanLab will generate a folder named "swanlog" in the same path as the code execution to store the data.
If you want to visualize the generated log files,
simply run the command `swanlab watch` in the same path where the code is executed
(without entering the "swanlog" folder).
You can also specify your own folder, but you must ensure that the folder exists and preferably does not contain
anything other than data generated by Swanlab.
In this case, if you want to view the logs,
you must use something like `swanlab watch -l ./your_specified_folder` to specify the folder path.
mode : str, optional
Allowed values are 'cloud', 'cloud-only', 'local', 'disabled'.
If the value is 'cloud', the data will be uploaded to the cloud and the local log will be saved.
If the value is 'cloud-only', the data will only be uploaded to the cloud and the local log will not be saved.
If the value is 'local', the data will only be saved locally and will not be uploaded to the cloud.
If the value is 'disabled', the data will not be saved or uploaded, just parsing the data.
"""
super().__init__(project=project,
workspace=workspace,
experiment_name=experiment_name,
description=description,
logdir=logdir,
mode=mode,
**kwargs)
|
To use the `SwanLabCallback`, pass it into the `callback` parameter when initializing the `transformers.Trainer`.
This allows the Trainer to utilize SwanLab's logging and monitoring functionalities during the training process.
Parameters same with `swanlab.init`. Finds more informations
[here](https://docs.swanlab.cn/api/py-init.html#swanlab-init)
Parameters
----------
project : str, optional
The project name of the current experiment, the default is None,
which means the current project name is the same as the current working directory.
workspace : str, optional
Where the current project is located, it can be an organization or a user (currently only supports yourself).
The default is None, which means the current entity is the same as the current user.
experiment_name : str, optional
The experiment name you currently have open. If this parameter is not provided,
SwanLab will generate one for you by default.
description : str, optional
The experiment description you currently have open,
used for a more detailed introduction or labeling of the current experiment.
If you do not provide this parameter, you can modify it later in the web interface.
logdir : str, optional
The folder will store all the log information generated during the execution of SwanLab.
If the parameter is None,
SwanLab will generate a folder named "swanlog" in the same path as the code execution to store the data.
If you want to visualize the generated log files,
simply run the command `swanlab watch` in the same path where the code is executed
(without entering the "swanlog" folder).
You can also specify your own folder, but you must ensure that the folder exists and preferably does not contain
anything other than data generated by Swanlab.
In this case, if you want to view the logs,
you must use something like `swanlab watch -l ./your_specified_folder` to specify the folder path.
mode : str, optional
Allowed values are 'cloud', 'cloud-only', 'local', 'disabled'.
If the value is 'cloud', the data will be uploaded to the cloud and the local log will be saved.
If the value is 'cloud-only', the data will only be uploaded to the cloud and the local log will not be saved.
If the value is 'local', the data will only be saved locally and will not be uploaded to the cloud.
If the value is 'disabled', the data will not be saved or uploaded, just parsing the data.
|
__init__
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/huggingface.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/huggingface.py
|
Apache-2.0
|
def add_config(self, config: Config, **kwargs) -> None:
"""Record the config to swanlab.
Args:
config (Config): The Config object
"""
def repack_dict(a, prefix=""):
"""
Unpack Nested Dictionary func
"""
new_dict = dict()
for key, value in a.items():
key = str(key)
if isinstance(value, dict):
if prefix != "":
new_dict.update(repack_dict(value, f"{prefix}/{key}"))
else:
new_dict.update(repack_dict(value, key))
elif isinstance(value, list) or isinstance(value, tuple):
if all(not isinstance(element, dict) for element in value):
new_dict[key] = value
else:
for i, item in enumerate(value):
new_dict.update(repack_dict(item, f"{key}[{i}]"))
elif prefix != "":
new_dict[f"{prefix}/{key}"] = value
else:
new_dict[key] = value
return new_dict
config_dict = config.to_dict()
self._swanlab.config.update(repack_dict(config_dict))
|
Record the config to swanlab.
Args:
config (Config): The Config object
|
add_config
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/mmengine.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/mmengine.py
|
Apache-2.0
|
def add_image(self, name: str, image: np.ndarray, step: int = 0, **kwargs) -> None:
"""Record the image to swanlab.
Args:
name (str): The image identifier.
image (np.ndarray): The image to be saved. The format
should be RGB. Defaults to None.
step (int): Global step value to record. Defaults to 0.
"""
image = self._swanlab.Image(image)
self._swanlab.log({name: image}, step=step)
|
Record the image to swanlab.
Args:
name (str): The image identifier.
image (np.ndarray): The image to be saved. The format
should be RGB. Defaults to None.
step (int): Global step value to record. Defaults to 0.
|
add_image
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/mmengine.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/mmengine.py
|
Apache-2.0
|
def add_scalars(
self,
scalar_dict: dict,
step: int = 0,
file_path: Optional[str] = None,
**kwargs,
) -> None:
"""Record the scalars' data.
Args:
scalar_dict (dict): Key-value pair storing the tag and
corresponding values.
step (int): Global step value to record. Defaults to 0.
file_path (str, optional): The scalar's data will be
saved to the `file_path` file at the same time
if the `file_path` parameter is specified.
Defaults to None.
"""
self._swanlab.log(scalar_dict, step=step)
|
Record the scalars' data.
Args:
scalar_dict (dict): Key-value pair storing the tag and
corresponding values.
step (int): Global step value to record. Defaults to 0.
file_path (str, optional): The scalar's data will be
saved to the `file_path` file at the same time
if the `file_path` parameter is specified.
Defaults to None.
|
add_scalars
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/mmengine.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/mmengine.py
|
Apache-2.0
|
def __init__(
self,
project: Optional[str] = None,
workspace: Optional[str] = None,
experiment_name: Optional[str] = None,
description: Optional[str] = None,
logdir: Optional[str] = None,
mode: Optional[str] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
):
"""
To use the `SwanLabCallback`, pass it into the `callback` parameter when initializing the `paddlenlp.Trainer`.
This allows the Trainer to utilize SwanLab's logging and monitoring functionalities during the training process.
Parameters same with `swanlab.init`. Finds more informations
[here](https://docs.swanlab.cn/api/py-init.html#swanlab-init)
Parameters
----------
project : str, optional
The project name of the current experiment, the default is None,
which means the current project name is the same as the current working directory.
workspace : str, optional
Where the current project is located, it can be an organization or a user (currently only supports yourself).
The default is None, which means the current entity is the same as the current user.
experiment_name : str, optional
The experiment name you currently have open. If this parameter is not provided,
SwanLab will generate one for you by default.
description : str, optional
The experiment description you currently have open,
used for a more detailed introduction or labeling of the current experiment.
If you do not provide this parameter, you can modify it later in the web interface.
logdir : str, optional
The folder will store all the log information generated during the execution of SwanLab.
If the parameter is None,
SwanLab will generate a folder named "swanlog" in the same path as the code execution to store the data.
If you want to visualize the generated log files,
simply run the command `swanlab watch` in the same path where the code is executed
(without entering the "swanlog" folder).
You can also specify your own folder, but you must ensure that the folder exists and preferably does not contain
anything other than data generated by Swanlab.
In this case, if you want to view the logs,
you must use something like `swanlab watch -l ./your_specified_folder` to specify the folder path.
mode : str, optional
Allowed values are 'cloud', 'cloud-only', 'local', 'disabled'.
If the value is 'cloud', the data will be uploaded to the cloud and the local log will be saved.
If the value is 'cloud-only', the data will only be uploaded to the cloud and the local log will not be saved.
If the value is 'local', the data will only be saved locally and will not be uploaded to the cloud.
If the value is 'disabled', the data will not be saved or uploaded, just parsing the data.
"""
self._swanlab = swanlab
self._initialized = False
self._log_model = os.getenv("SWANLAB_LOG_MODEL", None)
tags = tags or []
tags.append("paddlenlp") if "paddlenlp" not in tags else None
# for callback args
self._swanlab_init: Dict[str, Any] = {
"project": project,
"workspace": workspace,
"experiment_name": experiment_name,
"description": description,
"logdir": logdir,
"mode": mode,
"tags": tags,
}
self._swanlab_init.update(**kwargs)
|
To use the `SwanLabCallback`, pass it into the `callback` parameter when initializing the `paddlenlp.Trainer`.
This allows the Trainer to utilize SwanLab's logging and monitoring functionalities during the training process.
Parameters same with `swanlab.init`. Finds more informations
[here](https://docs.swanlab.cn/api/py-init.html#swanlab-init)
Parameters
----------
project : str, optional
The project name of the current experiment, the default is None,
which means the current project name is the same as the current working directory.
workspace : str, optional
Where the current project is located, it can be an organization or a user (currently only supports yourself).
The default is None, which means the current entity is the same as the current user.
experiment_name : str, optional
The experiment name you currently have open. If this parameter is not provided,
SwanLab will generate one for you by default.
description : str, optional
The experiment description you currently have open,
used for a more detailed introduction or labeling of the current experiment.
If you do not provide this parameter, you can modify it later in the web interface.
logdir : str, optional
The folder will store all the log information generated during the execution of SwanLab.
If the parameter is None,
SwanLab will generate a folder named "swanlog" in the same path as the code execution to store the data.
If you want to visualize the generated log files,
simply run the command `swanlab watch` in the same path where the code is executed
(without entering the "swanlog" folder).
You can also specify your own folder, but you must ensure that the folder exists and preferably does not contain
anything other than data generated by Swanlab.
In this case, if you want to view the logs,
you must use something like `swanlab watch -l ./your_specified_folder` to specify the folder path.
mode : str, optional
Allowed values are 'cloud', 'cloud-only', 'local', 'disabled'.
If the value is 'cloud', the data will be uploaded to the cloud and the local log will be saved.
If the value is 'cloud-only', the data will only be uploaded to the cloud and the local log will not be saved.
If the value is 'local', the data will only be saved locally and will not be uploaded to the cloud.
If the value is 'disabled', the data will not be saved or uploaded, just parsing the data.
|
__init__
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/paddlenlp.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/paddlenlp.py
|
Apache-2.0
|
def setup(self, args, state, model, **kwargs):
"""
Setup the optional SwanLab (*swanlab*) integration.
You can also override the following environment variables. Find more information about environment
variables [here](https://docs.swanlab.cn/en/api/environment-variable.html#environment-variables)
Environment:
- **SWANLAB_API_KEY** (`str`, *optional*, defaults to `None`):
Cloud API Key. During login, this environment variable is checked first. If it doesn't exist, the system
checks if the user is already logged in. If not, the login process is initiated.
- If a string is passed to the login interface, this environment variable is ignored.
- If the user is already logged in, this environment variable takes precedence over locally stored
login information.
- **SWANLAB_PROJECT** (`str`, *optional*, defaults to `None`):
Set this to a custom string to store results in a different project. If not specified, the name of the current
running directory is used.
- **SWANLAB_LOG_DIR** (`str`, *optional*, defaults to `swanlog`):
This environment variable specifies the storage path for log files when running in local mode.
By default, logs are saved in a folder named swanlog under the working directory.
- **SWANLAB_MODE** (`Literal["local", "cloud", "disabled"]`, *optional*, defaults to `cloud`):
SwanLab's parsing mode, which involves callbacks registered by the operator. Currently, there are three modes:
local, cloud, and disabled. Note: Case-sensitive. Find more information
[here](https://docs.swanlab.cn/en/api/py-init.html#swanlab-init)
- **SWANLAB_LOG_MODEL** (`str`, *optional*, defaults to `None`):
SwanLab does not currently support the save mode functionality.This feature will be available in a future
release
- **SWANLAB_WEB_HOST** (`str`, *optional*, defaults to `None`):
Web address for the SwanLab cloud environment for private version (its free)
- **SWANLAB_API_HOST** (`str`, *optional*, defaults to `None`):
API address for the SwanLab cloud environment for private version (its free)
"""
self._initialized = True
if state.is_world_process_zero:
logging.info('Automatic SwanLab logging enabled, to disable set os.environ["SWANLAB_MODE"] = "disabled"')
combined_dict = {**args.to_dict()}
if hasattr(model, "config") and model.config is not None:
model_config = model.config if isinstance(model.config, dict) else model.config.to_dict()
combined_dict = {**model_config, **combined_dict}
if hasattr(model, "peft_config") and model.peft_config is not None:
peft_config = model.peft_config
combined_dict = {**{"peft_config": peft_config}, **combined_dict}
trial_name = state.trial_name
init_args = {}
if trial_name is not None:
init_args["experiment_name"] = f"{args.run_name}-{trial_name}"
elif args.run_name is not None:
init_args["experiment_name"] = args.run_name
init_args["project"] = os.getenv("SWANLAB_PROJECT", None)
if self._swanlab.get_run() is None:
init_args.update(self._swanlab_init)
self._swanlab.init(
**init_args,
)
# show paddlenlp logo!
self._swanlab.config["FRAMEWORK"] = "paddlenlp"
# add config parameters (run may have been created manually)
self._swanlab.config.update(combined_dict)
# add number of model parameters to swanlab config
try:
self._swanlab.config.update({"model_num_parameters": model.num_parameters()})
# get peft model parameters
if type(model).__name__ == "PeftModel" or type(model).__name__ == "PeftMixedModel":
trainable_params, all_param = model.get_nb_trainable_parameters()
self._swanlab.config.update({"peft_model_trainable_params": trainable_params})
self._swanlab.config.update({"peft_model_all_param": all_param})
except AttributeError:
logging.info("Could not log the number of model parameters in SwanLab due to an AttributeError.")
# log the initial model architecture to an artifact
if self._log_model is not None:
logging.warning(
"SwanLab does not currently support the save mode functionality. "
"This feature will be available in a future release."
)
|
Setup the optional SwanLab (*swanlab*) integration.
You can also override the following environment variables. Find more information about environment
variables [here](https://docs.swanlab.cn/en/api/environment-variable.html#environment-variables)
Environment:
- **SWANLAB_API_KEY** (`str`, *optional*, defaults to `None`):
Cloud API Key. During login, this environment variable is checked first. If it doesn't exist, the system
checks if the user is already logged in. If not, the login process is initiated.
- If a string is passed to the login interface, this environment variable is ignored.
- If the user is already logged in, this environment variable takes precedence over locally stored
login information.
- **SWANLAB_PROJECT** (`str`, *optional*, defaults to `None`):
Set this to a custom string to store results in a different project. If not specified, the name of the current
running directory is used.
- **SWANLAB_LOG_DIR** (`str`, *optional*, defaults to `swanlog`):
This environment variable specifies the storage path for log files when running in local mode.
By default, logs are saved in a folder named swanlog under the working directory.
- **SWANLAB_MODE** (`Literal["local", "cloud", "disabled"]`, *optional*, defaults to `cloud`):
SwanLab's parsing mode, which involves callbacks registered by the operator. Currently, there are three modes:
local, cloud, and disabled. Note: Case-sensitive. Find more information
[here](https://docs.swanlab.cn/en/api/py-init.html#swanlab-init)
- **SWANLAB_LOG_MODEL** (`str`, *optional*, defaults to `None`):
SwanLab does not currently support the save mode functionality.This feature will be available in a future
release
- **SWANLAB_WEB_HOST** (`str`, *optional*, defaults to `None`):
Web address for the SwanLab cloud environment for private version (its free)
- **SWANLAB_API_HOST** (`str`, *optional*, defaults to `None`):
API address for the SwanLab cloud environment for private version (its free)
|
setup
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/paddlenlp.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/paddlenlp.py
|
Apache-2.0
|
def log_image(self, key: str, images: List[Any], step: Optional[int] = None, **kwargs: Any) -> None:
"""Log images (tensors, numpy arrays, PIL Images or file paths).
Optional kwargs are lists passed to each image (ex: caption).
"""
if not isinstance(images, list):
raise TypeError(f'Expected a list as "images", found {type(images)}')
n = len(images)
for k, v in kwargs.items():
if len(v) != n:
raise ValueError(f"Expected {n} items but only found {len(v)} for {k}")
kwarg_list = [{k: kwargs[k][i] for k in kwargs} for i in range(n)]
import swanlab
metrics = {key: [swanlab.Image(img, **kwarg) for img, kwarg in zip(images, kwarg_list)]}
self.log_metrics(metrics, step) # type: ignore[arg-type]
|
Log images (tensors, numpy arrays, PIL Images or file paths).
Optional kwargs are lists passed to each image (ex: caption).
|
log_image
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/pytorch_lightning.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/pytorch_lightning.py
|
Apache-2.0
|
def log_audio(self, key: str, audios: List[Any], step: Optional[int] = None, **kwargs: Any) -> None:
r"""Log audios (numpy arrays, or file paths).
Args:
key: The key to be used for logging the audio files
audios: The list of audio file paths, or numpy arrays to be logged
step: The step number to be used for logging the audio files
\**kwargs: Optional kwargs are lists passed to each ``swanlab.Audio`` instance (ex: caption, sample_rate).
Optional kwargs are lists passed to each audio (ex: caption, sample_rate).
"""
if not isinstance(audios, list):
raise TypeError(f'Expected a list as "audios", found {type(audios)}')
n = len(audios)
for k, v in kwargs.items():
if len(v) != n:
raise ValueError(f"Expected {n} items but only found {len(v)} for {k}")
kwarg_list = [{k: kwargs[k][i] for k in kwargs} for i in range(n)]
import swanlab
metrics = {key: [swanlab.Audio(audio, **kwarg) for audio, kwarg in zip(audios, kwarg_list)]}
self.log_metrics(metrics, step) # type: ignore[arg-type]
|
Log audios (numpy arrays, or file paths).
Args:
key: The key to be used for logging the audio files
audios: The list of audio file paths, or numpy arrays to be logged
step: The step number to be used for logging the audio files
\**kwargs: Optional kwargs are lists passed to each ``swanlab.Audio`` instance (ex: caption, sample_rate).
Optional kwargs are lists passed to each audio (ex: caption, sample_rate).
|
log_audio
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/pytorch_lightning.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/pytorch_lightning.py
|
Apache-2.0
|
def log_text(self, key: str, texts: List[Any], step: Optional[int] = None, **kwargs: Any) -> None:
r"""Log texts (numpy arrays, or file paths).
Args:
key: The key to be used for logging the string
audios: The list of string to be logged
step: The step number to be used for logging the string
\**kwargs: Optional kwargs are lists passed to each ``swanlab.Audio`` instance (ex: caption, sample_rate).
Optional kwargs are lists passed to each text (ex: caption).
"""
if not isinstance(texts, list):
raise TypeError(f'Expected a list as "texts", found {type(texts)}')
n = len(texts)
for k, v in kwargs.items():
if len(v) != n:
raise ValueError(f"Expected {n} items but only found {len(v)} for {k}")
kwarg_list = [{k: kwargs[k][i] for k in kwargs} for i in range(n)]
import swanlab
metrics = {key: [swanlab.Text(text, **kwarg) for text, kwarg in zip(texts, kwarg_list)]}
self.log_metrics(metrics, step) # type: ignore[arg-type]
|
Log texts (numpy arrays, or file paths).
Args:
key: The key to be used for logging the string
audios: The list of string to be logged
step: The step number to be used for logging the string
\**kwargs: Optional kwargs are lists passed to each ``swanlab.Audio`` instance (ex: caption, sample_rate).
Optional kwargs are lists passed to each text (ex: caption).
|
log_text
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/pytorch_lightning.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/pytorch_lightning.py
|
Apache-2.0
|
def after_iteration(self, model: Booster, epoch: int, evals_log: dict) -> bool:
"""Run after each iteration. Return True when training should stop."""
# Log metrics
for data, metric in evals_log.items():
for metric_name, log in metric.items():
swanlab.log({f"{data}-{metric_name}": log[-1]})
swanlab.log({"epoch": epoch})
return False
|
Run after each iteration. Return True when training should stop.
|
after_iteration
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/xgboost.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/xgboost.py
|
Apache-2.0
|
def __init__(
self, name: str, symbols: Sequence[str], resolver: ArgumentResponseResolver, client, lib_version
) -> None:
"""Patches the API to log SwanLab Media or metrics."""
# name of the LLM provider, e.g. "Cohere" or "OpenAI" or package name like "Transformers"
self.name = name
# api library name or client name, e.g. "openai" or "openai.OpenAI()"
self._api = client
# api library version
self.lib_version = lib_version
# dictionary of original methods
self.original_methods: Dict[str, Any] = {}
# list of symbols to patch, e.g. ["Client.generate", "Edit.create"] or ["Pipeline.__call__"]
self.symbols = symbols
# resolver callable to convert args/response into a dictionary of SwanLab media objects or metrics
self.resolver = resolver
|
Patches the API to log SwanLab Media or metrics.
|
__init__
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/integration_utils/autologging.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/integration_utils/autologging.py
|
Apache-2.0
|
def patch(self, run: "SwanLabRun") -> None:
"""Patches the API to log media or metrics to SwanLab."""
for symbol in self.symbols:
# split on dots, e.g. "Client.generate" -> ["Client", "generate"]
symbol_parts = symbol.split(".")
# and get the attribute from the module
original = functools.reduce(getattr, symbol_parts, self.set_api)
def method_factory(original_method: Any):
async def async_method(*args, **kwargs):
future = asyncio.Future()
async def callback(coro):
try:
result = await coro
loggable_dict = self.resolver(
args, kwargs, result, self.lib_version, timer.start_time, time_elapsed
)
if loggable_dict is not None:
swanlab.log(loggable_dict)
future.set_result(result)
except Exception as e:
print(e)
with Timer() as timer:
start_time = time.perf_counter()
coro = original_method(*args, **kwargs)
end_time = time.perf_counter()
time_elapsed = int(end_time - start_time + 0.5)
asyncio.ensure_future(callback(coro))
return await future
def sync_method(*args, **kwargs):
with Timer() as timer:
start_time = time.perf_counter()
result = original_method(*args, **kwargs)
end_time = time.perf_counter()
time_elapsed = int(end_time - start_time + 0.5)
try:
loggable_dict = self.resolver(
args,
kwargs,
result,
self.lib_version,
timer.start_time,
time_elapsed,
)
if loggable_dict is not None:
swanlab.log(loggable_dict)
except Exception as e:
print(e)
return result
if inspect.iscoroutinefunction(original_method):
return functools.wraps(original_method)(async_method)
else:
return functools.wraps(original_method)(sync_method)
# save original method
self.original_methods[symbol] = original
# monkey patch the method
if len(symbol_parts) == 1:
setattr(self.set_api, symbol_parts[0], method_factory(original))
else:
setattr(
functools.reduce(getattr, symbol_parts[:-1], self.set_api),
symbol_parts[-1],
method_factory(original),
)
|
Patches the API to log media or metrics to SwanLab.
|
patch
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/integration_utils/autologging.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/integration_utils/autologging.py
|
Apache-2.0
|
def enable(self, init: AutologInitArgs = None) -> None:
"""Enable autologging.
Args:
init: Optional dictionary of arguments to pass to SwanLab.init().
"""
if self._is_enabled:
print(f"{self._name} autologging is already enabled, disabling and re-enabling.")
self.disable()
print(f"Enabling {self._name} autologging.")
self._run_init(init=init)
self._patch_api.patch(self._run)
|
Enable autologging.
Args:
init: Optional dictionary of arguments to pass to SwanLab.init().
|
enable
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/integration_utils/autologging.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/integration_utils/autologging.py
|
Apache-2.0
|
def import_module_lazy(name: str) -> types.ModuleType:
"""Import a module lazily, only when it is used.
Inspired by importlib.util.LazyLoader, but improved so that the module loading is
thread-safe. Circular dependency between modules can lead to a deadlock if the two
modules are loaded from different threads.
"""
try:
return sys.modules[name]
except KeyError:
spec = importlib.util.find_spec(name)
if spec is None:
raise ModuleNotFoundError
module = importlib.util.module_from_spec(spec)
module.__lazy_module_state__ = LazyModuleState(module) # type: ignore
module.__class__ = LazyModule
sys.modules[name] = module
return module
|
Import a module lazily, only when it is used.
Inspired by importlib.util.LazyLoader, but improved so that the module loading is
thread-safe. Circular dependency between modules can lead to a deadlock if the two
modules are loaded from different threads.
|
import_module_lazy
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/integration_utils/get_modules.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/integration_utils/get_modules.py
|
Apache-2.0
|
def get_module(
name: str,
required: Optional[Union[str, bool]] = None,
lazy: bool = True,
) -> Any:
"""Return module or None. Absolute import is required.
:param (str) name: Dot-separated module path. E.g., 'scipy.stats'.
:param (str) required: A string to raise a ValueError if missing
:param (bool) lazy: If True, return a lazy loader for the module.
:return: (module|None) If import succeeds, the module will be returned.
"""
if name not in _not_importable:
try:
if not lazy:
return import_module(name)
else:
return import_module_lazy(name)
except Exception:
_not_importable.add(name)
msg = f"Error importing optional module {name}"
if required:
print(msg)
|
Return module or None. Absolute import is required.
:param (str) name: Dot-separated module path. E.g., 'scipy.stats'.
:param (str) required: A string to raise a ValueError if missing
:param (bool) lazy: If True, return a lazy loader for the module.
:return: (module|None) If import succeeds, the module will be returned.
|
get_module
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/integration_utils/get_modules.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/integration_utils/get_modules.py
|
Apache-2.0
|
def _resolve_edit(
self,
request: Dict[str, Any],
response: Response,
lib_version: str,
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.Edit`."""
request_str = f"\n\n**Instruction**: {request['instruction']}\n\n" f"**Input**: {request['input']}\n"
choices = [f"\n\n**Edited**: {choice['text']}\n" for choice in response["choices"]]
return self._resolve_metrics(
request=request,
response=response,
lib_version=lib_version,
request_str=request_str,
choices=choices,
time_elapsed=time_elapsed,
)
|
Resolves the request and response objects for `openai.Edit`.
|
_resolve_edit
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/openai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/openai/resolver.py
|
Apache-2.0
|
def _resolve_completion(
self,
request: Dict[str, Any],
response: Response,
lib_version: str,
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.Completion`."""
request_str = f"\n\n**Prompt**: {request['prompt']}\n"
choices = [f"\n\n**Completion**: {choice['text']}\n" for choice in response["choices"]]
return self._resolve_metrics(
request=request,
response=response,
lib_version=lib_version,
request_str=request_str,
choices=choices,
time_elapsed=time_elapsed,
)
|
Resolves the request and response objects for `openai.Completion`.
|
_resolve_completion
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/openai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/openai/resolver.py
|
Apache-2.0
|
def _resolve_chat_completion(
self,
request: Dict[str, Any],
response: Response,
lib_version: str,
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.Completion`."""
prompt = io.StringIO()
for message in request["messages"]:
prompt.write(f"\n\n**{message['role']}**: {message['content']}\n")
request_str = prompt.getvalue()
choices = [
f"\n\n**{choice['message']['role']}**: {choice['message']['content']}\n" for choice in response["choices"]
]
return self._resolve_metrics(
request=request,
response=response,
lib_version=lib_version,
request_str=request_str,
choices=choices,
time_elapsed=time_elapsed,
)
|
Resolves the request and response objects for `openai.Completion`.
|
_resolve_chat_completion
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/openai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/openai/resolver.py
|
Apache-2.0
|
def _resolve_metrics(
self,
request: Dict[str, Any],
response: Response,
lib_version: str,
request_str: str,
choices: List[str],
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.Completion`."""
results = [{"inputs": {"request": request_str}, "outputs": {"response": choice}} for choice in choices]
metrics = self._get_metrics_to_log(request, response, lib_version, results, time_elapsed)
return self._convert_metrics_to_dict(metrics)
|
Resolves the request and response objects for `openai.Completion`.
|
_resolve_metrics
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/openai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/openai/resolver.py
|
Apache-2.0
|
def _get_usage_metrics(response: Response, time_elapsed: float) -> UsageMetrics:
"""Gets the usage stats from the response object."""
if response.get("usage"):
usage_stats = UsageMetrics(**response["usage"])
else:
usage_stats = UsageMetrics()
usage_stats.elapsed_time = time_elapsed
return usage_stats
|
Gets the usage stats from the response object.
|
_get_usage_metrics
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/openai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/openai/resolver.py
|
Apache-2.0
|
def _resolve_edit(
self,
request: Dict[str, Any],
response: Response,
lib_version: str,
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.Edit`."""
request_str = f"\n\n**Instruction**: {request['instruction']}\n\n" f"**Input**: {request['input']}\n"
choices = [f"\n\n**Edited**: {choice['text']}\n" for choice in response["choices"]]
return self._resolve_metrics(
request=request,
response=response,
lib_version=lib_version,
request_str=request_str,
choices=choices,
time_elapsed=time_elapsed,
)
|
Resolves the request and response objects for `openai.Edit`.
|
_resolve_edit
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/openai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/openai/resolver.py
|
Apache-2.0
|
def _resolve_completion(
self,
request: Dict[str, Any],
response: Response,
lib_version: str,
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.OpenAI().Completion`."""
request_str = f"\n\n**Prompt**: {request['prompt']}\n"
response_choices = response.get("choices")
choices = [f"\n\n**Completion**: {choice['text']}\n" for choice in response_choices]
return self._resolve_metrics(
request=request,
response=response,
lib_version=lib_version,
request_str=request_str,
choices=choices,
time_elapsed=time_elapsed,
)
|
Resolves the request and response objects for `openai.OpenAI().Completion`.
|
_resolve_completion
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/openai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/openai/resolver.py
|
Apache-2.0
|
def _resolve_chat_completion(
self,
request: Dict[str, Any],
response: Response,
lib_version: str,
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.OpenAI().Completion`."""
prompt = io.StringIO()
for message in request["messages"]:
prompt.write(f"\n\n**{message['role']}**: {message['content']}\n")
request_str = prompt.getvalue()
response_choices = response.get("choices")
choices = [
f"\n\n**{choice['message']['role']}**: {choice['message']['content']}\n" for choice in response_choices
]
return self._resolve_metrics(
request=request,
response=response,
lib_version=lib_version,
request_str=request_str,
choices=choices,
time_elapsed=time_elapsed,
)
|
Resolves the request and response objects for `openai.OpenAI().Completion`.
|
_resolve_chat_completion
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/openai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/openai/resolver.py
|
Apache-2.0
|
def _resolve_metrics(
self,
request: Dict[str, Any],
response: Response,
lib_version: str,
request_str: str,
choices: List[str],
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `openai.Completion`."""
results = [{"inputs": {"request": request_str}, "outputs": {"response": choice}} for choice in choices]
metrics = self._get_metrics_to_log(request, response, lib_version, results, time_elapsed)
return self._convert_metrics_to_dict(metrics)
|
Resolves the request and response objects for `openai.Completion`.
|
_resolve_metrics
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/openai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/openai/resolver.py
|
Apache-2.0
|
def _get_usage_metrics(response: Response, time_elapsed: float) -> UsageMetrics:
"""Gets the usage stats from the response object."""
if response.get("usage"):
usage_stats = UsageMetrics(**response["usage"])
else:
usage_stats = UsageMetrics()
usage_stats.elapsed_time = time_elapsed
return usage_stats
|
Gets the usage stats from the response object.
|
_get_usage_metrics
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/openai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/openai/resolver.py
|
Apache-2.0
|
def _resolve_chat_completion(
self,
request: Dict[str, Any],
response: Response,
lib_version: str,
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `zhipuai.ZhipuAI().Completion`."""
prompt = io.StringIO()
for message in request["messages"]:
prompt.write(f"\n\n**{message['role']}**: {message['content']}\n")
request_str = prompt.getvalue()
response_choices = response.get("choices")
choices = [
f"\n\n**{choice['message']['role']}**: {choice['message']['content']}\n" for choice in response_choices
]
return self._resolve_metrics(
request=request,
response=response,
lib_version=lib_version,
request_str=request_str,
choices=choices,
time_elapsed=time_elapsed,
)
|
Resolves the request and response objects for `zhipuai.ZhipuAI().Completion`.
|
_resolve_chat_completion
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/zhipuai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/zhipuai/resolver.py
|
Apache-2.0
|
def _resolve_metrics(
self,
request: Dict[str, Any],
response: Response,
lib_version: str,
request_str: str,
choices: List[str],
time_elapsed: float,
) -> Dict[str, Any]:
"""Resolves the request and response objects for `zhipuai.ZhipuAI().Completion`."""
results = [{"inputs": {"request": request_str}, "outputs": {"response": choice}} for choice in choices]
metrics = self._get_metrics_to_log(request, response, lib_version, results, time_elapsed)
return self._convert_metrics_to_dict(metrics)
|
Resolves the request and response objects for `zhipuai.ZhipuAI().Completion`.
|
_resolve_metrics
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/zhipuai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/zhipuai/resolver.py
|
Apache-2.0
|
def _get_usage_metrics(response: Response, time_elapsed: float) -> UsageMetrics:
"""Gets the usage stats from the response object."""
if response.get("usage"):
usage_stats = UsageMetrics(**response["usage"])
else:
usage_stats = UsageMetrics()
usage_stats.elapsed_time = time_elapsed
return usage_stats
|
Gets the usage stats from the response object.
|
_get_usage_metrics
|
python
|
SwanHubX/SwanLab
|
swanlab/integration/zhipuai/resolver.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/integration/zhipuai/resolver.py
|
Apache-2.0
|
def __init__(
self,
sender_email: str,
receiver_email: str,
password: str,
smtp_server: str = "smtp.gmail.com",
port: int = 587,
language: str = "en",
):
"""
Initialize email callback configuration.
:param sender_email: SMTP account email address
:param receiver_email: Recipient email address
:param password: SMTP account password
:param smtp_server: SMTP server address
:param port: SMTP server port
:param language: Email content language (en/zh)
"""
self.sender_email = sender_email
self.receiver_email = receiver_email
self.password = password
self.smtp_server = smtp_server
self.port = port
self.language = language
|
Initialize email callback configuration.
:param sender_email: SMTP account email address
:param receiver_email: Recipient email address
:param password: SMTP account password
:param smtp_server: SMTP server address
:param port: SMTP server port
:param language: Email content language (en/zh)
|
__init__
|
python
|
SwanHubX/SwanLab
|
swanlab/plugin/notification.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/plugin/notification.py
|
Apache-2.0
|
def _create_email_content(self, error: Optional[str] = None) -> Dict[str, str]:
"""Generate bilingual email content based on experiment status."""
templates = self.DEFAULT_TEMPLATES[self.language]
# Determine email subject and body based on error status
if error:
subject = templates["subject_error"]
body = templates["body_error"].format(error=error)
else:
subject = templates["subject_success"]
body = templates["body_success"]
# Add experiment link if running in cloud mode
exp_link = swanlab.get_url()
if exp_link:
body += templates["link_text"].format(
project=self.project,
workspace=self.workspace,
exp_name=self.exp_name,
description=self.description,
link=exp_link,
)
return subject, body
|
Generate bilingual email content based on experiment status.
|
_create_email_content
|
python
|
SwanHubX/SwanLab
|
swanlab/plugin/notification.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/plugin/notification.py
|
Apache-2.0
|
def gen_sign(self, timesteamp: int) -> str:
"""
docs: https://open.feishu.cn/document/client-docs/bot-v3/add-custom-bot?lang=zh-CN#9ff32e8e
If the user has configured the signature verification function, this method is required to generate the signature
"""
if not self.secret:
raise ValueError("secret is required")
string_to_sign: str = f"{timesteamp}\n{self.secret}"
hmac_code = hmac.new(string_to_sign.encode("utf-8"), digestmod=hashlib.sha256).digest()
return base64.b64encode(hmac_code).decode("utf-8")
|
docs: https://open.feishu.cn/document/client-docs/bot-v3/add-custom-bot?lang=zh-CN#9ff32e8e
If the user has configured the signature verification function, this method is required to generate the signature
|
gen_sign
|
python
|
SwanHubX/SwanLab
|
swanlab/plugin/notification.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/plugin/notification.py
|
Apache-2.0
|
def _initialize_dataframe(self):
"""Initialize the DataFrame based on file existence."""
if self.file_exists:
try:
df = pd.read_csv(self.save_path)
if df.empty:
return self._create_empty_dataframe()
return df
except pd.errors.EmptyDataError:
return self._create_empty_dataframe()
else:
return self._create_empty_dataframe()
|
Initialize the DataFrame based on file existence.
|
_initialize_dataframe
|
python
|
SwanHubX/SwanLab
|
swanlab/plugin/writer.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/plugin/writer.py
|
Apache-2.0
|
def _create_empty_dataframe(self):
"""Create an empty DataFrame with default columns."""
self.file_exists = False # Treat as a new file
return pd.DataFrame(columns=["project", "exp_name", "description", "datetime", "run_id", "workspace", "logdir", "url"])
|
Create an empty DataFrame with default columns.
|
_create_empty_dataframe
|
python
|
SwanHubX/SwanLab
|
swanlab/plugin/writer.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/plugin/writer.py
|
Apache-2.0
|
def on_run(self, *args, **kwargs):
"""Handle actions to perform on run."""
run = swanlab.get_run()
config = run.config
self.logdir = run.public.swanlog_dir
timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# Set experiment URL if available
self.experiment_url = run.public.cloud.experiment_url if run.public.cloud.project_url else None
headers = ["project", "exp_name", "description", "datetime", "run_id", "workspace", "logdir", "url"]
row_data = [self.project, self.exp_name, self.description, timestamp, self.run_id, self.workspace, self.logdir, self.experiment_url]
if not self.file_exists:
self._handle_new_file(config, headers, row_data)
else:
self._handle_existing_file(config, headers, row_data)
|
Handle actions to perform on run.
|
on_run
|
python
|
SwanHubX/SwanLab
|
swanlab/plugin/writer.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/plugin/writer.py
|
Apache-2.0
|
def _handle_existing_file(self, config, headers, row_data):
"""Handle writing to an existing file."""
headers = self.original_headers.copy()
headers_metadata = headers[:8]
headers_config = [header for header in headers[8:] if header.startswith("config/")]
headers_config_dict = {key: {"value": " ", "index": headers_config.index(key)} for key in headers_config}
for key in config:
if "config/" + key not in headers_config_dict:
headers_config_dict["config/" + key] = {"value": config[key], "index": len(headers_config)}
headers_config.append("config/" + key)
else:
headers_config_dict["config/" + key]["value"] = config[key]
headers_config_list = [headers_config_dict[header]["value"] for header in headers_config]
headers = headers_metadata + headers_config
row_data = row_data + headers_config_list
new_row_df = pd.DataFrame([row_data], columns=headers)
updated_df = pd.concat([self.df, new_row_df], ignore_index=True)
updated_df.to_csv(self.save_path, index=False)
self.headers = headers
self.last_row_data = row_data
|
Handle writing to an existing file.
|
_handle_existing_file
|
python
|
SwanHubX/SwanLab
|
swanlab/plugin/writer.py
|
https://github.com/SwanHubX/SwanLab/blob/master/swanlab/plugin/writer.py
|
Apache-2.0
|
def test_from_mol(self, mol):
"""Tests creating a Molecule from an RDKit Mol object."""
if mol:
molecule = Molecule.from_mol(mol, caption="Ethanol")
assert molecule.caption == "Ethanol"
assert isinstance(molecule.pdb_data, str)
|
Tests creating a Molecule from an RDKit Mol object.
|
test_from_mol
|
python
|
SwanHubX/SwanLab
|
test/unit/data/modules/object3d/test_molecule.py
|
https://github.com/SwanHubX/SwanLab/blob/master/test/unit/data/modules/object3d/test_molecule.py
|
Apache-2.0
|
def test_from_pdb_file(self, pdb_file):
"""Tests creating a Molecule from a PDB file."""
molecule = Molecule.from_pdb_file(pdb_file, caption="Test PDB")
assert molecule.caption == "Test PDB"
assert isinstance(molecule.pdb_data, str)
with open(pdb_file) as f:
assert molecule.pdb_data == f.read()
|
Tests creating a Molecule from a PDB file.
|
test_from_pdb_file
|
python
|
SwanHubX/SwanLab
|
test/unit/data/modules/object3d/test_molecule.py
|
https://github.com/SwanHubX/SwanLab/blob/master/test/unit/data/modules/object3d/test_molecule.py
|
Apache-2.0
|
def test_from_sdf_file(self, sdf_file):
"""Tests creating a Molecule from an SDF file."""
molecule = Molecule.from_sdf_file(sdf_file, caption="Test SDF")
assert molecule.caption == "Test SDF"
assert isinstance(molecule.pdb_data, str)
|
Tests creating a Molecule from an SDF file.
|
test_from_sdf_file
|
python
|
SwanHubX/SwanLab
|
test/unit/data/modules/object3d/test_molecule.py
|
https://github.com/SwanHubX/SwanLab/blob/master/test/unit/data/modules/object3d/test_molecule.py
|
Apache-2.0
|
def test_from_mol_file(self, mol_file):
"""Tests creating a Molecule from a Mol file."""
molecule = Molecule.from_mol_file(mol_file, caption="Test Mol")
assert molecule.caption == "Test Mol"
assert isinstance(molecule.pdb_data, str)
|
Tests creating a Molecule from a Mol file.
|
test_from_mol_file
|
python
|
SwanHubX/SwanLab
|
test/unit/data/modules/object3d/test_molecule.py
|
https://github.com/SwanHubX/SwanLab/blob/master/test/unit/data/modules/object3d/test_molecule.py
|
Apache-2.0
|
def test_from_smiles(self):
"""Tests creating a Molecule from a SMILES string."""
molecule = Molecule.from_smiles("CCO", caption="Test SMILES")
assert molecule.caption == "Test SMILES"
assert isinstance(molecule.pdb_data, str)
|
Tests creating a Molecule from a SMILES string.
|
test_from_smiles
|
python
|
SwanHubX/SwanLab
|
test/unit/data/modules/object3d/test_molecule.py
|
https://github.com/SwanHubX/SwanLab/blob/master/test/unit/data/modules/object3d/test_molecule.py
|
Apache-2.0
|
def enum_blocks_static(instructions):
"""
Return a list of basicblock after
statically parsing given instructions
"""
basicblocks = list()
index = 0
# create the first block
new_block = False
end_block = False
block = BasicBlock(instructions[0].offset,
instructions[0],
name='block_%x' % instructions[0].offset)
for inst in instructions:
if new_block:
block = BasicBlock(inst.offset,
inst,
name='block_%x' % inst.offset)
new_block = False
# add current instruction to the basicblock
block.instructions.append(inst)
# absolute JUMP
if inst.is_branch_unconditional:
new_block = True
# conditionnal JUMPI
elif inst.is_branch_conditional:
new_block = True
# Halt instruction : RETURN, STOP, ...
elif inst.is_halt: # and inst != instructions[-1]:
new_block = True
# just falls to the next instruction
elif inst != instructions[-1] and \
instructions[index + 1].name == 'JUMPDEST':
new_block = True
# last instruction of the entire bytecode
elif inst == instructions[-1]:
end_block = True
if new_block or end_block:
block.end_offset = inst.offset_end
block.end_instr = inst
basicblocks.append(block)
new_block = True
end_block = False
index += 1
return basicblocks
|
Return a list of basicblock after
statically parsing given instructions
|
enum_blocks_static
|
python
|
FuzzingLabs/octopus
|
octopus/arch/evm/cfg.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/evm/cfg.py
|
MIT
|
def runtime_code_detector(self):
'''Check for presence of runtime code
'''
result = list(re.finditer('60.{2}604052', self.bytecode))
if len(result) > 1:
position = result[1].start()
logging.info("[+] Runtime code detected")
self.loader_code = self.bytecode[:position]
self.bytecode = self.bytecode[position:]
|
Check for presence of runtime code
|
runtime_code_detector
|
python
|
FuzzingLabs/octopus
|
octopus/arch/evm/disassembler.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/evm/disassembler.py
|
MIT
|
def swarm_hash_detector(self):
'''Check for presence of Swarm hash at the end of bytecode
https://github.com/ethereum/wiki/wiki/Swarm-Hash
'''
#swarm_hash_off = self.bytecode.find('a165627a7a72.*0029')
result = list(re.finditer('a165627a7a7230.*0029', self.bytecode))
# bzzr == 0x65627a7a72
if len(result) > 0:
swarm_hash_off = result[-1].start()
swarm_hash_end = result[-1].end()
if swarm_hash_off > 0:
logging.info("[+] Swarm hash detected in bytecode")
self.swarm_hash = self.bytecode[swarm_hash_off:swarm_hash_end]
logging.info("[+] Swarm hash value: 0x%s", self.swarm_hash)
# there is possibly constructor argument
# if there is swarm storage
if swarm_hash_end != len(self.bytecode):
self.constructor_args = self.bytecode[swarm_hash_end:]
logging.info("[+] Constructor arguments detected in bytecode")
logging.info("[+] Constructor arguments removed from bytecode")
logging.info("[+] Swarm hash removed from bytecode")
self.bytecode = self.bytecode[:swarm_hash_off]
|
Check for presence of Swarm hash at the end of bytecode
https://github.com/ethereum/wiki/wiki/Swarm-Hash
|
swarm_hash_detector
|
python
|
FuzzingLabs/octopus
|
octopus/arch/evm/disassembler.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/evm/disassembler.py
|
MIT
|
def disassemble(self, bytecode=None, offset=0, r_format='list',
analysis=True):
'''
creation code remove if analysis param is set to True (default)
r_format: ('list' | 'text' | 'reverse')
'''
self.bytecode = bytecode if bytecode else self.bytecode
if analysis:
self.analysis()
# reset lists
self.instructions = list()
self.reverse_instructions = dict()
# call generic Disassembler.disassemble method
return super().disassemble(self.bytecode, offset,
r_format)
|
creation code remove if analysis param is set to True (default)
r_format: ('list' | 'text' | 'reverse')
|
disassemble
|
python
|
FuzzingLabs/octopus
|
octopus/arch/evm/disassembler.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/evm/disassembler.py
|
MIT
|
def emul_sha3_instruction(self, instr, state):
'''Symbolic execution of SHA3 group of opcode'''
# SSA STACK
s0, s1 = state.ssa_stack.pop(), state.ssa_stack.pop()
instr.ssa = SSA(self.ssa_counter, instr.name, args=[s0, s1])
state.ssa_stack.append(instr)
self.ssa_counter += 1
|
Symbolic execution of SHA3 group of opcode
|
emul_sha3_instruction
|
python
|
FuzzingLabs/octopus
|
octopus/arch/evm/emulator.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/evm/emulator.py
|
MIT
|
def _get_reverse_table(self):
"""Build an internal table used in the assembler."""
reverse_table = {}
for (opcode, (mnemonic, immediate_operand_size,
pops, pushes, gas, description)) in _table.items():
reverse_table[mnemonic] = opcode, mnemonic, immediate_operand_size, \
pops, pushes, gas, description
return reverse_table
|
Build an internal table used in the assembler.
|
_get_reverse_table
|
python
|
FuzzingLabs/octopus
|
octopus/arch/evm/evm.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/evm/evm.py
|
MIT
|
def group(self):
'''Instruction classification as per the yellow paper'''
classes = {0: 'Stop and Arithmetic Operations',
1: 'Comparison & Bitwise Logic Operations',
2: 'SHA3',
3: 'Environmental Information',
4: 'Block Information',
5: 'Stack, Memory, Storage and Flow Operations',
6: 'Push Operations',
7: 'Push Operations',
8: 'Duplication Operations',
9: 'Exchange Operations',
0xa: 'Logging Operations',
0xf: 'System operations'}
return classes.get(self.opcode >> 4, 'Invalid instruction')
|
Instruction classification as per the yellow paper
|
group
|
python
|
FuzzingLabs/octopus
|
octopus/arch/evm/instruction.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/evm/instruction.py
|
MIT
|
def __decode_header(self, header, h_data):
"""Decode wasm header
Return tuple (magic, version) of wasm module header
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#high-level-structure
"""
magic = \
h_data.magic.to_bytes(header.magic.byte_size, 'little')
version = \
h_data.version.to_bytes(header.version.byte_size, 'little')
return (magic, version)
|
Decode wasm header
Return tuple (magic, version) of wasm module header
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#high-level-structure
|
__decode_header
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def __decode_type_section(self, type_section):
"""Decode wasm type section
Return a list of tuple (param_str, return_str)
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#type-section
"""
type_list = []
for idx, entry in enumerate(type_section.payload.entries):
param_str = ''
return_str = ''
param_str += ' '.join([LANG_TYPE.get(_x) for _x in entry.param_types])
if entry.return_type:
return_str = '%s' % LANG_TYPE.get(entry.return_type)
type_list.append((param_str, return_str))
return type_list
|
Decode wasm type section
Return a list of tuple (param_str, return_str)
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#type-section
|
__decode_type_section
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def __decode_import_section(self, import_section):
"""Decode import section to tuple of list
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#import-section
"""
entries = import_section.payload.entries
import_list = []
import_func_list = []
for idx, entry in enumerate(entries):
# for encoding in ('utf-8', 'utf-16-be'):
# value = str(v)
# try:
# value = v.decode(encoding)
# break
# except UnicodeDecodeError:
# value = str(v)
try:
module_str = entry.module_str.tobytes().decode('utf-8')
except UnicodeDecodeError:
module_str = entry.module_str.tobytes()
try:
field_str = entry.field_str.tobytes().decode('utf-8')
except UnicodeDecodeError:
field_str = entry.field_str.tobytes()
logging.debug('%s %s', module_str, field_str)
kind_type = KIND_TYPE.get(entry.kind)
if kind_type == 'function':
f_type = format_kind_function(entry.type.type)
import_list.append((entry.kind, module_str, field_str, f_type))
# add also the info into the specific import function list
import_func_list.append((module_str, field_str, f_type))
elif kind_type == 'table':
tabl = format_kind_table(entry.type.element_type,
entry.type.limits.flags,
entry.type.limits.initial,
entry.type.limits.maximum)
import_list.append((entry.kind, module_str, field_str, tabl))
elif kind_type == 'memory':
mem = format_kind_memory(entry.type.limits.flags,
entry.type.limits.initial,
entry.type.limits.maximum)
import_list.append((entry.kind, module_str, field_str, mem))
elif kind_type == 'global':
gbl = format_kind_global(entry.type.content_type,
entry.type.mutability)
import_list.append((entry.kind, module_str, field_str, gbl))
else:
logging.error('unknown %d %s %s', entry.kind,
module_str, field_str)
return (import_list, import_func_list)
|
Decode import section to tuple of list
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#import-section
|
__decode_import_section
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def __decode_table_section(self, table_section):
"""
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#table-section
"""
# on the MVP, table size == 1
entries = table_section.payload.entries
table_list = []
for idx, entry in enumerate(entries):
element_type = entry.element_type
flags = entry.limits.flags
initial = entry.limits.initial
maximum = entry.limits.maximum
fmt = format_kind_table(element_type,
flags,
initial,
maximum)
table_list.append(fmt)
return table_list
|
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#table-section
|
__decode_table_section
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def __decode_memory_section(self, memory_section):
"""
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#memory-section
"""
# on the MVP, memory size == 1
memory_l = list()
entries = memory_section.payload.entries
for idx, entry in enumerate(entries):
flags = entry.limits.flags
initial = entry.limits.initial
maximum = entry.limits.maximum
fmt = format_kind_memory(flags,
initial,
maximum)
memory_l.append(fmt)
return memory_l
|
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#memory-section
|
__decode_memory_section
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def __decode_global_section(self, global_section):
"""
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#global-section
"""
globals_l = list()
for entry in global_section.payload.globals:
fmt = format_kind_global(entry.type.mutability,
entry.type.content_type)
globals_l.append(fmt)
return globals_l
|
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#global-section
|
__decode_global_section
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def __decode_export_section(self, export_section):
"""
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#export-section
"""
entries = export_section.payload.entries
export_list = []
for idx, entry in enumerate(entries):
# field_str == function_name
try:
field_str = entry.field_str.tobytes().decode('utf-8')
except UnicodeDecodeError:
field_str = entry.field_str.tobytes()
kind = entry.kind
index = entry.index
fmt = {'field_str': field_str,
'kind': kind,
'index': index}
export_list.append(fmt)
return export_list
|
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#export-section
|
__decode_export_section
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def __decode_element_section(self, element_section):
"""
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#element-section
"""
entries = element_section.payload.entries
element_list = []
for idx, entry in enumerate(entries):
fmt = {'index': entry.index,
'offset': entry.offset,
'elems': entry.elems}
element_list.append(fmt)
return element_list
|
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#element-section
|
__decode_element_section
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def __decode_code_section(self, code_section):
"""
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#code-section
"""
bodies = code_section.payload.bodies
code_list = []
for idx, entry in enumerate(bodies):
code_raw = entry.code.tobytes()
code_list.append(code_raw)
return code_list
|
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#code-section
|
__decode_code_section
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def __decode_data_section(self, data_section):
"""
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#data-section
"""
entries = data_section.payload.entries
data_list = []
for idx, entry in enumerate(entries):
data = entry.data.tobytes()
fmt = {'index': entry.index,
'offset': entry.offset,
'size': entry.size,
'data': data}
data_list.append(fmt)
return data_list
|
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#data-section
|
__decode_data_section
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def __decode_name_section(self, name_subsection):
"""
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#name-section
"""
names_list = list()
if name_subsection.name_type == NAME_SUBSEC_FUNCTION:
subsection_function = name_subsection.payload
for name in subsection_function.names:
try:
name_str = name.name_str.tobytes().decode('utf-8')
except UnicodeDecodeError:
name_str = name.name_str.tobytes()
names_list.append((name.index, name.name_len, name_str))
elif name_subsection.name_type == NAME_SUBSEC_LOCAL:
print("__decode_name_section NAME_SUBSEC_LOCAL not implemented")
else:
print("__decode_name_section name_type unknown")
return names_list
|
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#name-section
|
__decode_name_section
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def __decode_unknown_section(self, unknown_section):
"""
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#high-level-structure
"""
sec_name = unknown_section.name.tobytes()
payload = unknown_section.payload.tobytes()
return (sec_name, payload)
|
.. seealso:: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md#high-level-structure
|
__decode_unknown_section
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def analyze(self):
"""analyse the complete module & extract informations """
# src: https://github.com/WebAssembly/design/blob/master/BinaryEncoding.md
# custom 0 name, .debug_str, ...
# Type 1 Function signature declarations
# Import 2 Import declarations
# Function 3 Function declarations
# Table 4 Indirect function table and other tables
# Memory 5 Memory attributes
# Global 6 Global declarations
# Export 7 Exports
# Start 8 Start function declaration
# Element 9 Elements section
# Code 10 Function bodies (code)
# Data 11 Data segments
# reset attributes
self.attributes_reset()
mod_iter = iter(decode_module(self.module_bytecode, True))
# decode header version - usefull in the future (multiple versions)
header, header_data = next(mod_iter)
self.magic, self.version = self.__decode_header(header, header_data)
#
# Wasm sections
#
for cur_sec, cur_sec_data in mod_iter:
sec = cur_sec_data.get_decoder_meta()['types']['payload']
if isinstance(sec, TypeSection):
self.types = self.__decode_type_section(cur_sec_data)
elif isinstance(sec, ImportSection):
self.imports_all, self.imports_func = \
self.__decode_import_section(cur_sec_data)
elif isinstance(sec, FunctionSection):
self.func_types = self.__decode_function_section(cur_sec_data)
elif isinstance(sec, TableSection):
self.tables = self.__decode_table_section(cur_sec_data)
elif isinstance(sec, MemorySection):
self.memories = self.__decode_memory_section(cur_sec_data)
elif isinstance(sec, GlobalSection):
# TODO not analyzed
self.globals = self.__decode_global_section(cur_sec_data)
elif isinstance(sec, ExportSection):
self.exports = self.__decode_export_section(cur_sec_data)
elif isinstance(sec, StartSection):
# TODO not analyzed
self.start = self.__decode_start_section(cur_sec_data)
elif isinstance(sec, ElementSection):
self.elements = self.__decode_element_section(cur_sec_data)
elif isinstance(sec, CodeSection):
self.codes = self.__decode_code_section(cur_sec_data)
elif isinstance(sec, DataSection):
self.datas = self.__decode_data_section(cur_sec_data)
# name section
elif isinstance(cur_sec, NameSubSection):
self.names = self.__decode_name_section(cur_sec_data)
else:
self.customs.append(self.__decode_unknown_section(cur_sec_data))
# create ordered list of functions
self.func_prototypes = self.get_func_prototypes_ordered()
return True
|
analyse the complete module & extract informations
|
analyze
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/analyzer.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/analyzer.py
|
MIT
|
def enum_func(module_bytecode):
''' return a list of Function
see:: octopus.core.function
'''
functions = list()
analyzer = WasmModuleAnalyzer(module_bytecode)
protos = analyzer.func_prototypes
import_len = len(analyzer.imports_func)
for idx, code in enumerate(analyzer.codes):
# get corresponding function prototype
name, param_str, return_str, _ = protos[import_len + idx]
prefered_name = format_func_name(name, param_str, return_str)
instructions = WasmDisassembler().disassemble(code)
cur_function = Function(0, instructions[0], name=name,
prefered_name=prefered_name)
cur_function.instructions = instructions
functions.append(cur_function)
return functions
|
return a list of Function
see:: octopus.core.function
|
enum_func
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/cfg.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/cfg.py
|
MIT
|
def enum_func_name_call_indirect(functions):
''' return a list of function name if they used call_indirect
'''
func_name = list()
# iterate over functions
for func in functions:
for inst in func.instructions:
if inst.name == "call_indirect":
func_name.append(func.name)
func_name = list(set(func_name))
return func_name
|
return a list of function name if they used call_indirect
|
enum_func_name_call_indirect
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/cfg.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/cfg.py
|
MIT
|
def enum_blocks_edges(function_id, instructions):
"""
Return a list of basicblock after
statically parsing given instructions
"""
basicblocks = list()
edges = list()
branches = []
xrefs = []
intent = 0
blocks_tmp = []
blocks_list = []
# we need to do that because jump label are relative to the current block index
for index, inst in enumerate(instructions[:-1]):
if inst.is_block_terminator:
start, name = blocks_tmp.pop()
if inst.name == 'else':
end = inst.offset - 1
else:
end = inst.offset_end
blocks_list.append((intent, start, end, name))
intent -= 1
if inst.is_block_starter: # in ['block', 'loop']:
blocks_tmp.append((inst.offset, inst.name))
intent += 1
if inst.is_branch:
branches.append((intent, inst))
# add function body end
blocks_list.append((0, 0, instructions[-1].offset_end, 'func'))
blocks_list = sorted(blocks_list, key=lambda tup: (tup[1], tup[0]))
for depth, inst in branches:
labl = list()
if inst.name == 'br_table':
labl = [i for i in inst.insn_byte[2:]]
else:
labl.append(int(inst.operand_interpretation.split(' ')[-1]))
for d2 in labl:
rep = next(((i, s, e, n) for i, s, e, n in blocks_list if (i == (depth - d2) and s < inst.offset and e > inst.offset_end)), None)
if rep:
i, start, end, name = rep
# if we branch to a 'loop' label
# we go at the entry of the 'loop' block
if name == 'loop':
value = start
# if we branch to a 'block' label
# we go at the end of the "block" block
elif name == 'block' or name == 'func':
value = end
# we don't know
else:
value = None
inst.xref.append(value)
xrefs.append(value)
# assign xref for "if" branch
# needed because 'if' don't used label
for index, inst in enumerate(instructions[:-1]):
if inst.name == 'if':
g_block = next(iter([b for b in blocks_list if b[1] == inst.offset]), None)
jump_target = g_block[2] + 1
inst.xref.append(jump_target)
xrefs.append(jump_target)
elif inst.name == 'else':
g_block = next(iter([b for b in blocks_list if b[1] == inst.offset]), None)
jump_target = g_block[2] + 1
inst.xref.append(jump_target)
xrefs.append(jump_target)
# enumerate blocks
new_block = True
for index, inst in enumerate(instructions):
# creation of a block
if new_block:
block = BasicBlock(inst.offset,
inst,
name=format_bb_name(function_id, inst.offset))
new_block = False
# add current instruction to the basicblock
block.instructions.append(inst)
# next instruction is a jump target
if index < (len(instructions) - 1) and \
instructions[index + 1].offset in xrefs:
new_block = True
# absolute jump - br
elif inst.is_branch_unconditional:
new_block = True
# conditionnal jump - br_if
elif inst.is_branch_conditional:
new_block = True
# is_block_terminator
# GRAPHICAL OPTIMIZATION: merge end together
elif index < (len(instructions) - 1) and \
instructions[index + 1].name in ['else', 'loop']: # is_block_terminator
new_block = True
# last instruction of the bytecode
elif inst.offset == instructions[-1].offset:
new_block = True
if new_block:
block.end_offset = inst.offset_end
block.end_instr = inst
basicblocks.append(block)
new_block = True
# enumerate edges
for index, block in enumerate(basicblocks):
# get the last instruction
inst = block.end_instr
# unconditional jump - br
if inst.is_branch_unconditional:
for ref in inst.xref:
edges.append(Edge(block.name, format_bb_name(function_id, ref), EDGE_UNCONDITIONAL))
# conditionnal jump - br_if, if
elif inst.is_branch_conditional:
if inst.name == 'if':
edges.append(Edge(block.name,
format_bb_name(function_id, inst.offset_end + 1),
EDGE_CONDITIONAL_TRUE))
if_b = next(iter([b for b in blocks_list if b[1] == inst.offset]), None)
#else_block = blocks_list[blocks_list.index(if_block) + 1]
jump_target = if_b[2] + 1
edges.append(Edge(block.name,
format_bb_name(function_id, jump_target),
EDGE_CONDITIONAL_FALSE))
else:
for ref in inst.xref:
if ref and ref != inst.offset_end + 1:
# create conditionnal true edges
edges.append(Edge(block.name,
format_bb_name(function_id, ref),
EDGE_CONDITIONAL_TRUE))
# create conditionnal false edge
edges.append(Edge(block.name,
format_bb_name(function_id, inst.offset_end + 1),
EDGE_CONDITIONAL_FALSE))
# instruction that end the flow
elif [i.name for i in block.instructions if i.is_halt]:
pass
elif inst.is_halt:
pass
# handle the case when you have if and else following
elif inst.offset != instructions[-1].offset and \
block.start_instr.name != 'else' and \
instructions[instructions.index(inst) + 1].name == 'else':
else_ins = instructions[instructions.index(inst) + 1]
else_b = next(iter([b for b in blocks_list if b[1] == else_ins.offset]), None)
edges.append(Edge(block.name, format_bb_name(function_id, else_b[2] + 1), EDGE_FALLTHROUGH))
# add the last intruction "end" in the last block
elif inst.offset != instructions[-1].offset:
# EDGE_FALLTHROUGH
edges.append(Edge(block.name, format_bb_name(function_id, inst.offset_end + 1), EDGE_FALLTHROUGH))
# prevent duplicate edges
edges = list(set(edges))
return basicblocks, edges
|
Return a list of basicblock after
statically parsing given instructions
|
enum_blocks_edges
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/cfg.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/cfg.py
|
MIT
|
def visualize(self, function=True, simplify=False, ssa=False):
"""Visualize the cfg
used CFGGraph
equivalent to:
graph = CFGGraph(cfg)
graph.view_functions()
"""
graph = CFGGraph(self)
if function:
graph.view_functions(simplify=simplify, ssa=ssa)
else:
graph.view(simplify=simplify, ssa=ssa)
|
Visualize the cfg
used CFGGraph
equivalent to:
graph = CFGGraph(cfg)
graph.view_functions()
|
visualize
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/cfg.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/cfg.py
|
MIT
|
def visualize_call_flow(self, filename="wasm_call_graph_octopus.gv",
format_fname=False):
"""Visualize the cfg call flow graph
"""
nodes, edges = self.get_functions_call_edges()
if format_fname:
nodes_longname, edges = self.get_functions_call_edges(format_fname=True)
g = Digraph(filename, filename=filename)
g.attr(rankdir='LR')
with g.subgraph(name='global') as c:
export_list = [p[0] for p in self.analyzer.func_prototypes if p[3] == 'export']
import_list = [p[0] for p in self.analyzer.func_prototypes if p[3] == 'import']
call_indirect_list = enum_func_name_call_indirect(self.functions)
try:
indirect_target = [self.analyzer.func_prototypes[index][0] for index in self.analyzer.elements[0].get('elems')]
except IndexError:
indirect_target = []
# create all the graph nodes (function name)
for idx, node in enumerate(nodes):
# name graph bubble
node_name = node
if format_fname:
node_name = nodes_longname[idx]
# default style value
fillcolor = "white"
shape = "ellipse"
style = "filled"
if node in import_list:
logging.debug('import ' + node)
fillcolor = DESIGN_IMPORT.get('fillcolor')
shape = DESIGN_IMPORT.get('shape')
style = DESIGN_IMPORT.get('style')
c.node(node_name, fillcolor=fillcolor, shape=shape, style=style)
elif node in export_list:
logging.debug('export ' + node)
fillcolor = DESIGN_EXPORT.get('fillcolor')
shape = DESIGN_EXPORT.get('shape')
style = DESIGN_EXPORT.get('style')
c.node(node_name, fillcolor=fillcolor, shape=shape, style=style)
if node in indirect_target:
logging.debug('indirect_target ' + node)
shape = "hexagon"
if node in call_indirect_list:
logging.debug('contain call_indirect ' + node)
style = "dashed"
c.node(node_name, fillcolor=fillcolor, shape=shape, style=style)
# check if multiple same edges
# in that case, put the number into label
edges_counter = dict((x, edges.count(x)) for x in set(edges))
# insert edges on the graph
for edge, count in edges_counter.items():
label = None
if count > 1:
label = str(count)
c.edge(edge.node_from, edge.node_to, label=label)
g.render(filename, view=True)
|
Visualize the cfg call flow graph
|
visualize_call_flow
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/cfg.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/cfg.py
|
MIT
|
def visualize_instrs_per_funcs(self, show=True, save=True,
out_filename="wasm_func_analytic.png",
fontsize=8):
"""Visualize the instructions repartitions per functions
"""
import numpy as np
import matplotlib.pyplot as plt
final = list()
datas = list()
# legend x axis - name functions
group_names = tuple([func.name for func in self.functions])
# number of functions
ind = [x for x, _ in enumerate(self.functions)]
# list all groups
all_groups = [v for _, v in _groups.items()]
# list()
for func in self.functions:
data = list()
group = [i.group for i in func.instructions]
for g in all_groups:
data.append(group.count(g))
datas.append(tuple(data))
for idx in range(len(all_groups)):
final.append(tuple([x[idx] for x in datas]))
# choice color: https://matplotlib.org/users/colormaps.html
color = iter(plt.cm.gist_rainbow(np.linspace(0, 1, len(all_groups))))
stack = np.array([0 * len(all_groups)])
for idx in range(len(all_groups)):
if idx == 0:
# first bar
plt.barh(ind, final[idx], label=all_groups[idx],
align='center', color=next(color))
else:
plt.barh(ind, final[idx], label=all_groups[idx], left=stack,
align='center', color=next(color))
stack = stack + np.array(final[idx])
# Rotate x-labels on the x-axis
plt.yticks(fontsize=fontsize)
plt.ylim([0, len(self.functions)])
plt.yticks(ind, group_names)
plt.ylabel('Functions')
plt.xlabel('Instructions count')
plt.legend(loc="lower right")
plt.title('Instructions count by function and group')
# save
if save:
plt.savefig(out_filename)
# show
if show:
plt.show()
|
Visualize the instructions repartitions per functions
|
visualize_instrs_per_funcs
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/cfg.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/cfg.py
|
MIT
|
def decode_module(module, decode_name_subsections=False):
"""Decodes raw WASM modules, yielding `ModuleFragment`s."""
module_wnd = memoryview(module)
# Read & yield module header.
hdr = ModuleHeader()
hdr_len, hdr_data, _ = hdr.from_raw(None, module_wnd)
yield ModuleFragment(hdr, hdr_data)
module_wnd = module_wnd[hdr_len:]
# Read & yield sections.
while module_wnd:
sec = Section()
sec_len, sec_data, _ = sec.from_raw(None, module_wnd)
# If requested, decode name subsections when encountered.
if (
decode_name_subsections and
sec_data.id == SEC_UNK and
sec_data.name == SEC_NAME
):
sec_wnd = sec_data.payload
while sec_wnd:
subsec = NameSubSection()
subsec_len, subsec_data, _ = subsec.from_raw(None, sec_wnd)
yield ModuleFragment(subsec, subsec_data)
sec_wnd = sec_wnd[subsec_len:]
else:
yield ModuleFragment(sec, sec_data)
# fix bug KeyError
if sec_data.id == SEC_UNK and sec_data.name:
sec_len -= sec_data.name_len + 1
module_wnd = module_wnd[sec_len:]
|
Decodes raw WASM modules, yielding `ModuleFragment`s.
|
decode_module
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/decode.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/decode.py
|
MIT
|
def disassemble_opcode(self, bytecode=None, offset=0):
'''
based on decode_bytecode()
https://github.com/athre0z/wasm/blob/master/wasm/decode.py
'''
bytecode_wnd = memoryview(bytecode)
opcode_id = byte2int(bytecode_wnd[0])
# default value
# opcode:(mnemonic/name, imm_struct, pops, pushes, description)
invalid = ('INVALID', 0, 0, 0, 'Unknown opcode')
name, imm_struct, pops, pushes, description = \
self.asm.table.get(opcode_id, invalid)
operand_size = 0
operand = None
operand_interpretation = None
if imm_struct is not None:
operand_size, operand, _ = imm_struct.from_raw(None, bytecode_wnd[1:])
insn = inst_namedtuple(OPCODE_MAP[opcode_id], operand, 1 + operand_size)
operand_interpretation = format_instruction(insn)
insn_byte = bytecode_wnd[:1 + operand_size].tobytes()
instruction = WasmInstruction(opcode_id, name, imm_struct, operand_size,
insn_byte, pops, pushes, description,
operand_interpretation=operand_interpretation,
offset=offset)
# print('%d %s' % (offset, str(instruction)))
return instruction
|
based on decode_bytecode()
https://github.com/athre0z/wasm/blob/master/wasm/decode.py
|
disassemble_opcode
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/disassembler.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/disassembler.py
|
MIT
|
def __eq__(self, other):
""" Instructions are equal if all features match """
return self.opcode == other.opcode and\
self.name == other.name and\
self.offset == other.offset and\
self.insn_byte == other.insn_byte and\
self.operand_size == other.operand_size and\
self.pops == other.pops and\
self.pushes == other.pushes and\
self.operand_interpretation == other.operand_interpretation and\
self.description == other.description
|
Instructions are equal if all features match
|
__eq__
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/instruction.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/instruction.py
|
MIT
|
def _get_reverse_table(self):
"""Build an internal table used in the assembler."""
# opcode:(mnemonic/name, imm_struct, pops, pushes, description)
reverse_table = {}
for (opcode, (mnemonic, imm_struct,
pops, pushes, description)) in self.table.items():
reverse_table[mnemonic] = opcode, mnemonic, imm_struct, pops, pushes, description
return reverse_table
|
Build an internal table used in the assembler.
|
_get_reverse_table
|
python
|
FuzzingLabs/octopus
|
octopus/arch/wasm/wasm.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/arch/wasm/wasm.py
|
MIT
|
def __eq__(self, other):
""" Instructions are equal if all features match """
return self.opcode == other.opcode and\
self.name == other.name and\
self.operand == other.operand and\
self.operand_size == other.operand_size and\
self.pops == other.pops and\
self.pushes == other.pushes and\
self.fee == other.fee and\
self.offset == other.offset and\
self.description == other.description
|
Instructions are equal if all features match
|
__eq__
|
python
|
FuzzingLabs/octopus
|
octopus/core/instruction.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/core/instruction.py
|
MIT
|
def disassemble(self, bytecode=None, offset=0, r_format='list'):
"""Generic method to disassemble bytecode
:param bytecode: bytecode sequence
:param offset: start offset
:param r_format: output format ('list'/'text'/'reverse')
:type bytecode: bytes, str
:type offset: int
:type r_format: list, str, dict
:return: dissassembly result depending of r_format
:rtype: list, str, dict
"""
# reinitialize class variable
self.attributes_reset()
self.bytecode = bytecode if bytecode else self.bytecode
if not self.bytecode:
raise BytecodeEmptyException()
self.bytecode = bytecode_to_bytes(self.bytecode)
while offset < len(self.bytecode):
instr = self.disassemble_opcode(self.bytecode[offset:], offset)
offset += instr.size
self.instructions.append(instr)
# fill reverse instructions
self.reverse_instructions = {k: v for k, v in
enumerate(self.instructions)}
# return instructions
if r_format == 'list':
return self.instructions
elif r_format == 'text':
return '\n'.join(map(str, self.instructions))
elif r_format == 'reverse':
return self.reverse_instructions
|
Generic method to disassemble bytecode
:param bytecode: bytecode sequence
:param offset: start offset
:param r_format: output format ('list'/'text'/'reverse')
:type bytecode: bytes, str
:type offset: int
:type r_format: list, str, dict
:return: dissassembly result depending of r_format
:rtype: list, str, dict
|
disassemble
|
python
|
FuzzingLabs/octopus
|
octopus/engine/disassembler.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/engine/disassembler.py
|
MIT
|
def bech32_polymod(values):
"""Internal function that computes the Bech32 checksum."""
generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
chk = 1
for value in values:
top = chk >> 25
chk = (chk & 0x1ffffff) << 5 ^ value
for i in range(5):
chk ^= generator[i] if ((top >> i) & 1) else 0
return chk
|
Internal function that computes the Bech32 checksum.
|
bech32_polymod
|
python
|
FuzzingLabs/octopus
|
octopus/platforms/BTC/bech32.py
|
https://github.com/FuzzingLabs/octopus/blob/master/octopus/platforms/BTC/bech32.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.