repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
ingra14m/Specular-Gaussians
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras" }, { "identifier": "qvec2rotmat", "path": "scene/colmap_loader.py", "snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2] ** 2 - 2 * qvec[3] ** 2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1] ** 2 - 2 * qvec[3] ** 2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1] ** 2 - 2 * qvec[2] ** 2]])" }, { "identifier": "read_extrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24 * num_points2D,\n format_char_sequence=\"ddq\" * num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8 * num_params,\n format_char_sequence=\"d\" * num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras" }, { "identifier": "read_points3D_binary", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8 * track_length,\n format_char_sequence=\"ii\" * track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors" }, { "identifier": "read_points3D_text", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n if xyzs is None:\n xyzs = xyz[None, ...]\n rgbs = rgb[None, ...]\n errors = error[None, ...]\n else:\n xyzs = np.append(xyzs, xyz[None, ...], axis=0)\n rgbs = np.append(rgbs, rgb[None, ...], axis=0)\n errors = np.append(errors, error[None, ...], axis=0)\n return xyzs, rgbs, errors" }, { "identifier": "getWorld2View2", "path": "utils/graphics_utils.py", "snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)" }, { "identifier": "focal2fov", "path": "utils/graphics_utils.py", "snippet": "def focal2fov(focal, pixels):\n return 2 * math.atan(pixels / (2 * focal))" }, { "identifier": "fov2focal", "path": "utils/graphics_utils.py", "snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))" }, { "identifier": "SH2RGB", "path": "utils/sh_utils.py", "snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5" }, { "identifier": "BasicPointCloud", "path": "scene/gaussian_model.py", "snippet": "class GaussianModel:\n def __init__(self, sh_degree: int, asg_degree: int):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):\n def get_asg_features(self):\n def get_roughness(self):\n def get_albedo(self):\n def get_metallic(self):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier=1):\n def get_normal(self, dir_pp_normalized=None, return_delta=False):\n def get_minimum_axis(self):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd: BasicPointCloud, spatial_lr_scale: float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path, og_number_points=-1):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling,\n new_rotation, new_feature_asg, new_normal, new_normal2, new_roughness, new_albedo,\n new_metallic):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)" }, { "identifier": "camera_nerfies_from_JSON", "path": "utils/camera_utils.py", "snippet": "def camera_nerfies_from_JSON(path, scale):\n \"\"\"Loads a JSON camera into memory.\"\"\"\n with open(path, 'r') as fp:\n camera_json = json.load(fp)\n\n # Fix old camera JSON.\n if 'tangential' in camera_json:\n camera_json['tangential_distortion'] = camera_json['tangential']\n\n return dict(\n orientation=np.array(camera_json['orientation']),\n position=np.array(camera_json['position']),\n focal_length=camera_json['focal_length'] * scale,\n principal_point=np.array(camera_json['principal_point']) * scale,\n skew=camera_json['skew'],\n pixel_aspect_ratio=camera_json['pixel_aspect_ratio'],\n radial_distortion=np.array(camera_json['radial_distortion']),\n tangential_distortion=np.array(camera_json['tangential_distortion']),\n image_size=np.array((int(round(camera_json['image_size'][0] * scale)),\n int(round(camera_json['image_size'][1] * scale)))),\n )" } ]
import os import sys import numpy as np import json import imageio import cv2 as cv from PIL import Image from typing import NamedTuple, Optional from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from glob import glob from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from utils.camera_utils import camera_nerfies_from_JSON
5,461
else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path) cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height) cam_infos.append(cam_info) sys.stdout.write('\n') return cam_infos def fetchPly(path): plydata = PlyData.read(path) vertices = plydata['vertex'] positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T return BasicPointCloud(points=positions, colors=colors, normals=normals) def storePly(path, xyz, rgb): # Define the dtype for the structured array dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] normals = np.zeros_like(xyz) elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key=lambda x: x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try: xyz, rgb, _ = read_points3D_binary(bin_path) except: xyz, rgb, _ = read_points3D_text(txt_path) storePly(ply_path, xyz, rgb) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"): cam_infos = [] with open(os.path.join(path, transformsfile)) as json_file: contents = json.load(json_file) fovx = contents["camera_angle_x"] frames = contents["frames"] for idx, frame in enumerate(frames): cam_name = os.path.join(path, frame["file_path"] + extension) matrix = np.linalg.inv(np.array(frame["transform_matrix"])) R = -np.transpose(matrix[:3, :3]) R[:, 0] = -R[:, 0] T = -matrix[:3, 3] image_path = os.path.join(path, cam_name) image_name = Path(cam_name).stem image = Image.open(image_path) # depth = imageio.imread(depth_name) im_data = np.array(image.convert("RGBA")) bg = np.array([1, 1, 1]) if white_background else np.array([0, 0, 0]) norm_data = im_data / 255.0 arr = norm_data[:, :, :3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4]) image = Image.fromarray(np.array(arr * 255.0, dtype=np.byte), "RGB")
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int R: np.array T: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str width: int height: int depth: Optional[np.array] = None class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def load_K_Rt_from_P(filename, P=None): if P is None: lines = open(filename).read().splitlines() if len(lines) == 4: lines = lines[1:] lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)] P = np.asarray(lines).astype(np.float32).squeeze() out = cv.decomposeProjectionMatrix(P) K = out[0] R = out[1] t = out[2] K = K / K[2, 2] pose = np.eye(4, dtype=np.float32) pose[:3, :3] = R.transpose() pose[:3, 3] = (t[:3] / t[3])[:, 0] return K, pose def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] num_frames = len(cam_extrinsics) for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx + 1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model == "SIMPLE_PINHOLE": focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model == "PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path) cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height) cam_infos.append(cam_info) sys.stdout.write('\n') return cam_infos def fetchPly(path): plydata = PlyData.read(path) vertices = plydata['vertex'] positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T return BasicPointCloud(points=positions, colors=colors, normals=normals) def storePly(path, xyz, rgb): # Define the dtype for the structured array dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] normals = np.zeros_like(xyz) elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key=lambda x: x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try: xyz, rgb, _ = read_points3D_binary(bin_path) except: xyz, rgb, _ = read_points3D_text(txt_path) storePly(ply_path, xyz, rgb) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"): cam_infos = [] with open(os.path.join(path, transformsfile)) as json_file: contents = json.load(json_file) fovx = contents["camera_angle_x"] frames = contents["frames"] for idx, frame in enumerate(frames): cam_name = os.path.join(path, frame["file_path"] + extension) matrix = np.linalg.inv(np.array(frame["transform_matrix"])) R = -np.transpose(matrix[:3, :3]) R[:, 0] = -R[:, 0] T = -matrix[:3, 3] image_path = os.path.join(path, cam_name) image_name = Path(cam_name).stem image = Image.open(image_path) # depth = imageio.imread(depth_name) im_data = np.array(image.convert("RGBA")) bg = np.array([1, 1, 1]) if white_background else np.array([0, 0, 0]) norm_data = im_data / 255.0 arr = norm_data[:, :, :3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4]) image = Image.fromarray(np.array(arr * 255.0, dtype=np.byte), "RGB")
fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1])
9
2023-12-12 14:59:01+00:00
8k
Artiprocher/DiffSynth-Studio
diffsynth/extensions/FastBlend/api.py
[ { "identifier": "AccurateModeRunner", "path": "diffsynth/extensions/FastBlend/runners/accurate.py", "snippet": "class AccurateModeRunner:\n def __init__(self):\n pass\n\n def run(self, frames_guide, frames_style, batch_size, window_size, ebsynth_config, desc=\"Accurate Mode\", save_path=None):\n patch_match_engine = PyramidPatchMatcher(\n image_height=frames_style[0].shape[0],\n image_width=frames_style[0].shape[1],\n channel=3,\n use_mean_target_style=True,\n **ebsynth_config\n )\n # run\n n = len(frames_style)\n for target in tqdm(range(n), desc=desc):\n l, r = max(target - window_size, 0), min(target + window_size + 1, n)\n remapped_frames = []\n for i in range(l, r, batch_size):\n j = min(i + batch_size, r)\n source_guide = np.stack([frames_guide[source] for source in range(i, j)])\n target_guide = np.stack([frames_guide[target]] * (j - i))\n source_style = np.stack([frames_style[source] for source in range(i, j)])\n _, target_style = patch_match_engine.estimate_nnf(source_guide, target_guide, source_style)\n remapped_frames.append(target_style)\n frame = np.concatenate(remapped_frames, axis=0).mean(axis=0)\n frame = frame.clip(0, 255).astype(\"uint8\")\n if save_path is not None:\n Image.fromarray(frame).save(os.path.join(save_path, \"%05d.png\" % target))" }, { "identifier": "FastModeRunner", "path": "diffsynth/extensions/FastBlend/runners/fast.py", "snippet": "class FastModeRunner:\n def __init__(self):\n pass\n\n def run(self, frames_guide, frames_style, batch_size, window_size, ebsynth_config, save_path=None):\n frames_guide = frames_guide.raw_data()\n frames_style = frames_style.raw_data()\n table_manager = TableManager()\n patch_match_engine = PyramidPatchMatcher(\n image_height=frames_style[0].shape[0],\n image_width=frames_style[0].shape[1],\n channel=3,\n **ebsynth_config\n )\n # left part\n table_l = table_manager.build_remapping_table(frames_guide, frames_style, patch_match_engine, batch_size, desc=\"Fast Mode Step 1/4\")\n table_l = table_manager.remapping_table_to_blending_table(table_l)\n table_l = table_manager.process_window_sum(frames_guide, table_l, patch_match_engine, window_size, batch_size, desc=\"Fast Mode Step 2/4\")\n # right part\n table_r = table_manager.build_remapping_table(frames_guide[::-1], frames_style[::-1], patch_match_engine, batch_size, desc=\"Fast Mode Step 3/4\")\n table_r = table_manager.remapping_table_to_blending_table(table_r)\n table_r = table_manager.process_window_sum(frames_guide[::-1], table_r, patch_match_engine, window_size, batch_size, desc=\"Fast Mode Step 4/4\")[::-1]\n # merge\n frames = []\n for (frame_l, weight_l), frame_m, (frame_r, weight_r) in zip(table_l, frames_style, table_r):\n weight_m = -1\n weight = weight_l + weight_m + weight_r\n frame = frame_l * (weight_l / weight) + frame_m * (weight_m / weight) + frame_r * (weight_r / weight)\n frames.append(frame)\n frames = [frame.clip(0, 255).astype(\"uint8\") for frame in frames]\n if save_path is not None:\n for target, frame in enumerate(frames):\n Image.fromarray(frame).save(os.path.join(save_path, \"%05d.png\" % target))" }, { "identifier": "BalancedModeRunner", "path": "diffsynth/extensions/FastBlend/runners/balanced.py", "snippet": "class BalancedModeRunner:\n def __init__(self):\n pass\n\n def run(self, frames_guide, frames_style, batch_size, window_size, ebsynth_config, desc=\"Balanced Mode\", save_path=None):\n patch_match_engine = PyramidPatchMatcher(\n image_height=frames_style[0].shape[0],\n image_width=frames_style[0].shape[1],\n channel=3,\n **ebsynth_config\n )\n # tasks\n n = len(frames_style)\n tasks = []\n for target in range(n):\n for source in range(target - window_size, target + window_size + 1):\n if source >= 0 and source < n and source != target:\n tasks.append((source, target))\n # run\n frames = [(None, 1) for i in range(n)]\n for batch_id in tqdm(range(0, len(tasks), batch_size), desc=desc):\n tasks_batch = tasks[batch_id: min(batch_id+batch_size, len(tasks))]\n source_guide = np.stack([frames_guide[source] for source, target in tasks_batch])\n target_guide = np.stack([frames_guide[target] for source, target in tasks_batch])\n source_style = np.stack([frames_style[source] for source, target in tasks_batch])\n _, target_style = patch_match_engine.estimate_nnf(source_guide, target_guide, source_style)\n for (source, target), result in zip(tasks_batch, target_style):\n frame, weight = frames[target]\n if frame is None:\n frame = frames_style[target]\n frames[target] = (\n frame * (weight / (weight + 1)) + result / (weight + 1),\n weight + 1\n )\n if weight + 1 == min(n, target + window_size + 1) - max(0, target - window_size):\n frame = frame.clip(0, 255).astype(\"uint8\")\n if save_path is not None:\n Image.fromarray(frame).save(os.path.join(save_path, \"%05d.png\" % target))\n frames[target] = (None, 1)" }, { "identifier": "InterpolationModeRunner", "path": "diffsynth/extensions/FastBlend/runners/interpolation.py", "snippet": "class InterpolationModeRunner:\n def __init__(self):\n pass\n\n def get_index_dict(self, index_style):\n index_dict = {}\n for i, index in enumerate(index_style):\n index_dict[index] = i\n return index_dict\n\n def get_weight(self, l, m, r):\n weight_l, weight_r = abs(m - r), abs(m - l)\n if weight_l + weight_r == 0:\n weight_l, weight_r = 0.5, 0.5\n else:\n weight_l, weight_r = weight_l / (weight_l + weight_r), weight_r / (weight_l + weight_r)\n return weight_l, weight_r\n\n def get_task_group(self, index_style, n):\n task_group = []\n index_style = sorted(index_style)\n # first frame\n if index_style[0]>0:\n tasks = []\n for m in range(index_style[0]):\n tasks.append((index_style[0], m, index_style[0]))\n task_group.append(tasks)\n # middle frames\n for l, r in zip(index_style[:-1], index_style[1:]):\n tasks = []\n for m in range(l, r):\n tasks.append((l, m, r))\n task_group.append(tasks)\n # last frame\n tasks = []\n for m in range(index_style[-1], n):\n tasks.append((index_style[-1], m, index_style[-1]))\n task_group.append(tasks)\n return task_group\n\n def run(self, frames_guide, frames_style, index_style, batch_size, ebsynth_config, save_path=None):\n patch_match_engine = PyramidPatchMatcher(\n image_height=frames_style[0].shape[0],\n image_width=frames_style[0].shape[1],\n channel=3,\n use_mean_target_style=False,\n use_pairwise_patch_error=True,\n **ebsynth_config\n )\n # task\n index_dict = self.get_index_dict(index_style)\n task_group = self.get_task_group(index_style, len(frames_guide))\n # run\n for tasks in task_group:\n index_start, index_end = min([i[1] for i in tasks]), max([i[1] for i in tasks])\n for batch_id in tqdm(range(0, len(tasks), batch_size), desc=f\"Rendering frames {index_start}...{index_end}\"):\n tasks_batch = tasks[batch_id: min(batch_id+batch_size, len(tasks))]\n source_guide, target_guide, source_style = [], [], []\n for l, m, r in tasks_batch:\n # l -> m\n source_guide.append(frames_guide[l])\n target_guide.append(frames_guide[m])\n source_style.append(frames_style[index_dict[l]])\n # r -> m\n source_guide.append(frames_guide[r])\n target_guide.append(frames_guide[m])\n source_style.append(frames_style[index_dict[r]])\n source_guide = np.stack(source_guide)\n target_guide = np.stack(target_guide)\n source_style = np.stack(source_style)\n _, target_style = patch_match_engine.estimate_nnf(source_guide, target_guide, source_style)\n if save_path is not None:\n for frame_l, frame_r, (l, m, r) in zip(target_style[0::2], target_style[1::2], tasks_batch):\n weight_l, weight_r = self.get_weight(l, m, r)\n frame = frame_l * weight_l + frame_r * weight_r\n frame = frame.clip(0, 255).astype(\"uint8\")\n Image.fromarray(frame).save(os.path.join(save_path, \"%05d.png\" % m))" }, { "identifier": "InterpolationModeSingleFrameRunner", "path": "diffsynth/extensions/FastBlend/runners/interpolation.py", "snippet": "class InterpolationModeSingleFrameRunner:\n def __init__(self):\n pass\n\n def run(self, frames_guide, frames_style, index_style, batch_size, ebsynth_config, save_path=None):\n # check input\n tracking_window_size = ebsynth_config[\"tracking_window_size\"]\n if tracking_window_size * 2 >= batch_size:\n raise ValueError(\"batch_size should be larger than track_window_size * 2\")\n frame_style = frames_style[0]\n frame_guide = frames_guide[index_style[0]]\n patch_match_engine = PyramidPatchMatcher(\n image_height=frame_style.shape[0],\n image_width=frame_style.shape[1],\n channel=3,\n **ebsynth_config\n )\n # run\n frame_id, n = 0, len(frames_guide)\n for i in tqdm(range(0, n, batch_size - tracking_window_size * 2), desc=f\"Rendering frames 0...{n}\"):\n if i + batch_size > n:\n l, r = max(n - batch_size, 0), n\n else:\n l, r = i, i + batch_size\n source_guide = np.stack([frame_guide] * (r-l))\n target_guide = np.stack([frames_guide[i] for i in range(l, r)])\n source_style = np.stack([frame_style] * (r-l))\n _, target_style = patch_match_engine.estimate_nnf(source_guide, target_guide, source_style)\n for i, frame in zip(range(l, r), target_style):\n if i==frame_id:\n frame = frame.clip(0, 255).astype(\"uint8\")\n Image.fromarray(frame).save(os.path.join(save_path, \"%05d.png\" % frame_id))\n frame_id += 1\n if r < n and r-frame_id <= tracking_window_size:\n break" }, { "identifier": "VideoData", "path": "diffsynth/extensions/FastBlend/data.py", "snippet": "class VideoData:\n def __init__(self, video_file, image_folder, **kwargs):\n if video_file is not None:\n self.data_type = \"video\"\n self.data = LowMemoryVideo(video_file, **kwargs)\n elif image_folder is not None:\n self.data_type = \"images\"\n self.data = LowMemoryImageFolder(image_folder, **kwargs)\n else:\n raise ValueError(\"Cannot open video or image folder\")\n self.length = None\n self.height = None\n self.width = None\n\n def raw_data(self):\n frames = []\n for i in range(self.__len__()):\n frames.append(self.__getitem__(i))\n return frames\n\n def set_length(self, length):\n self.length = length\n\n def set_shape(self, height, width):\n self.height = height\n self.width = width\n\n def __len__(self):\n if self.length is None:\n return len(self.data)\n else:\n return self.length\n\n def shape(self):\n if self.height is not None and self.width is not None:\n return self.height, self.width\n else:\n height, width, _ = self.__getitem__(0).shape\n return height, width\n\n def __getitem__(self, item):\n frame = self.data.__getitem__(item)\n height, width, _ = frame.shape\n if self.height is not None and self.width is not None:\n if self.height != height or self.width != width:\n frame = Image.fromarray(frame).resize((self.width, self.height))\n frame = np.array(frame)\n return frame\n\n def __del__(self):\n pass" }, { "identifier": "get_video_fps", "path": "diffsynth/extensions/FastBlend/data.py", "snippet": "def get_video_fps(file_name):\n reader = imageio.get_reader(file_name)\n fps = reader.get_meta_data()[\"fps\"]\n reader.close()\n return fps" }, { "identifier": "save_video", "path": "diffsynth/extensions/FastBlend/data.py", "snippet": "def save_video(frames_path, video_path, num_frames, fps):\n writer = imageio.get_writer(video_path, fps=fps, quality=9)\n for i in range(num_frames):\n frame = np.array(Image.open(os.path.join(frames_path, \"%05d.png\" % i)))\n writer.append_data(frame)\n writer.close()\n return video_path" }, { "identifier": "search_for_images", "path": "diffsynth/extensions/FastBlend/data.py", "snippet": "def search_for_images(folder):\n file_list = [i for i in os.listdir(folder) if i.endswith(\".jpg\") or i.endswith(\".png\")]\n file_list = [(split_file_name(file_name), file_name) for file_name in file_list]\n file_list = [i[1] for i in sorted(file_list)]\n file_list = [os.path.join(folder, i) for i in file_list]\n return file_list" } ]
from .runners import AccurateModeRunner, FastModeRunner, BalancedModeRunner, InterpolationModeRunner, InterpolationModeSingleFrameRunner from .data import VideoData, get_video_fps, save_video, search_for_images import os import gradio as gr
4,073
def check_input_for_blending(video_guide, video_guide_folder, video_style, video_style_folder): frames_guide = VideoData(video_guide, video_guide_folder) frames_style = VideoData(video_style, video_style_folder) message = "" if len(frames_guide) < len(frames_style): message += f"The number of frames mismatches. Only the first {len(frames_guide)} frames of style video will be used.\n" frames_style.set_length(len(frames_guide)) elif len(frames_guide) > len(frames_style): message += f"The number of frames mismatches. Only the first {len(frames_style)} frames of guide video will be used.\n" frames_guide.set_length(len(frames_style)) height_guide, width_guide = frames_guide.shape() height_style, width_style = frames_style.shape() if height_guide != height_style or width_guide != width_style: message += f"The shape of frames mismatches. The frames in style video will be resized to (height: {height_guide}, width: {width_guide})\n" frames_style.set_shape(height_guide, width_guide) return frames_guide, frames_style, message def smooth_video( video_guide, video_guide_folder, video_style, video_style_folder, mode, window_size, batch_size, tracking_window_size, output_path, fps, minimum_patch_size, num_iter, guide_weight, initialize, progress = None, ): # input frames_guide, frames_style, message = check_input_for_blending(video_guide, video_guide_folder, video_style, video_style_folder) if len(message) > 0: print(message) # output if output_path == "": if video_style is None: output_path = os.path.join(video_style_folder, "output") else: output_path = os.path.join(os.path.split(video_style)[0], "output") os.makedirs(output_path, exist_ok=True) print("No valid output_path. Your video will be saved here:", output_path) elif not os.path.exists(output_path): os.makedirs(output_path, exist_ok=True) print("Your video will be saved here:", output_path) frames_path = os.path.join(output_path, "frames") video_path = os.path.join(output_path, "video.mp4") os.makedirs(frames_path, exist_ok=True) # process if mode == "Fast" or mode == "Balanced": tracking_window_size = 0 ebsynth_config = { "minimum_patch_size": minimum_patch_size, "threads_per_block": 8, "num_iter": num_iter, "gpu_id": 0, "guide_weight": guide_weight, "initialize": initialize, "tracking_window_size": tracking_window_size, } if mode == "Fast":
def check_input_for_blending(video_guide, video_guide_folder, video_style, video_style_folder): frames_guide = VideoData(video_guide, video_guide_folder) frames_style = VideoData(video_style, video_style_folder) message = "" if len(frames_guide) < len(frames_style): message += f"The number of frames mismatches. Only the first {len(frames_guide)} frames of style video will be used.\n" frames_style.set_length(len(frames_guide)) elif len(frames_guide) > len(frames_style): message += f"The number of frames mismatches. Only the first {len(frames_style)} frames of guide video will be used.\n" frames_guide.set_length(len(frames_style)) height_guide, width_guide = frames_guide.shape() height_style, width_style = frames_style.shape() if height_guide != height_style or width_guide != width_style: message += f"The shape of frames mismatches. The frames in style video will be resized to (height: {height_guide}, width: {width_guide})\n" frames_style.set_shape(height_guide, width_guide) return frames_guide, frames_style, message def smooth_video( video_guide, video_guide_folder, video_style, video_style_folder, mode, window_size, batch_size, tracking_window_size, output_path, fps, minimum_patch_size, num_iter, guide_weight, initialize, progress = None, ): # input frames_guide, frames_style, message = check_input_for_blending(video_guide, video_guide_folder, video_style, video_style_folder) if len(message) > 0: print(message) # output if output_path == "": if video_style is None: output_path = os.path.join(video_style_folder, "output") else: output_path = os.path.join(os.path.split(video_style)[0], "output") os.makedirs(output_path, exist_ok=True) print("No valid output_path. Your video will be saved here:", output_path) elif not os.path.exists(output_path): os.makedirs(output_path, exist_ok=True) print("Your video will be saved here:", output_path) frames_path = os.path.join(output_path, "frames") video_path = os.path.join(output_path, "video.mp4") os.makedirs(frames_path, exist_ok=True) # process if mode == "Fast" or mode == "Balanced": tracking_window_size = 0 ebsynth_config = { "minimum_patch_size": minimum_patch_size, "threads_per_block": 8, "num_iter": num_iter, "gpu_id": 0, "guide_weight": guide_weight, "initialize": initialize, "tracking_window_size": tracking_window_size, } if mode == "Fast":
FastModeRunner().run(frames_guide, frames_style, batch_size=batch_size, window_size=window_size, ebsynth_config=ebsynth_config, save_path=frames_path)
1
2023-12-07 16:52:15+00:00
8k
vikhyat/mixtral-inference
mixtral/model.py
[ { "identifier": "precompute_freqs_cis", "path": "mixtral/rope.py", "snippet": "def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0) -> torch.Tensor:\n freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))\n t = torch.arange(end, device=freqs.device) # type: ignore\n freqs = torch.outer(t, freqs).float() # type: ignore\n return torch.polar(torch.ones_like(freqs), freqs) # complex64" }, { "identifier": "apply_rotary_emb", "path": "mixtral/rope.py", "snippet": "def apply_rotary_emb(\n xq: torch.Tensor,\n xk: torch.Tensor,\n freqs_cis: torch.Tensor,\n) -> Tuple[torch.Tensor, torch.Tensor]:\n xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))\n xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))\n freqs_cis = freqs_cis[:, None, :]\n xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(2)\n xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(2)\n return xq_out.type_as(xq), xk_out.type_as(xk)" }, { "identifier": "CacheView", "path": "mixtral/cache.py", "snippet": "class CacheView:\n def __init__(self, cache_k: torch.Tensor, cache_v: torch.Tensor, metadata: RotatingCacheInputMetadata, kv_seqlens: torch.Tensor):\n self.cache_k = cache_k\n self.cache_v = cache_v\n self.kv_seqlens = kv_seqlens\n self.metadata = metadata\n\n def update(self, xk: torch.Tensor, xv: torch.Tensor):\n \"\"\"\n to_cache_mask masks the last [sliding_window] tokens in each sequence\n \"\"\"\n n_kv_heads, head_dim = self.cache_k.shape[-2:]\n flat_cache_k = self.cache_k.view(-1, n_kv_heads, head_dim)\n flat_cache_v = self.cache_v.view(-1, n_kv_heads, head_dim)\n \n flat_cache_k.index_copy_(0, self.metadata.cache_positions, xk[self.metadata.to_cache_mask])\n flat_cache_v.index_copy_(0, self.metadata.cache_positions, xv[self.metadata.to_cache_mask])\n\n def interleave_kv(self, xk: torch.Tensor, xv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n This is a naive implementation and not optimized for speed.\n \"\"\"\n assert xk.ndim == xv.ndim == 3 # (B * T, H, D)\n assert xk.shape == xv.shape\n\n if all([s == 0 for s in self.metadata.seqlens]):\n # No cache to interleave\n return xk, xv\n\n # Make it a list of [(T, H, D)]\n xk = torch.split(xk, self.metadata.seqlens)\n xv = torch.split(xv, self.metadata.seqlens)\n assert len(xk) == len(self.kv_seqlens), f\"Batch size is {len(self.kv_seqlens)}, got {len(xk)}\"\n\n # Order elements in cache by position by unrotating\n cache_k = [unrotate(t, s) for t, s in zip(self.cache_k.to(xk[0].device), self.kv_seqlens)]\n cache_v = [unrotate(t, s) for t, s in zip(self.cache_v.to(xv[0].device), self.kv_seqlens)]\n\n interleaved_k = interleave_list(cache_k, xk)\n interleaved_v = interleave_list(cache_v, xv)\n\n return torch.cat(interleaved_k, dim=0), torch.cat(interleaved_v, dim=0)\n\n @property\n def sliding_window(self):\n return self.cache_k.shape[1]\n\n @property\n def key(self) -> torch.Tensor:\n return self.cache_k[:len(self.kv_seqlens)]\n\n @property\n def value(self) -> torch.Tensor:\n return self.cache_v[:len(self.kv_seqlens)]\n\n @property\n def prefill(self):\n return self.metadata.prefill\n\n @property\n def mask(self):\n return self.metadata.mask" }, { "identifier": "RotatingBufferCache", "path": "mixtral/cache.py", "snippet": "class RotatingBufferCache:\n \"\"\"\n This is an example that implements a less naive rotating buffer cache, allowing for variable length sequences.\n Allocated cache is rectangular which is wasteful (see PagedAttention for better mechanisms)\n \"\"\"\n def __init__(self, n_layers: int, max_batch_size: int, sliding_window: int, n_kv_heads: int, head_dim: int):\n\n self.sliding_window = sliding_window\n self.n_kv_heads = n_kv_heads\n self.head_dim = head_dim\n\n self.cache_k = torch.empty((\n n_layers,\n max_batch_size,\n sliding_window,\n n_kv_heads,\n head_dim\n ))\n self.cache_v = torch.empty((\n n_layers,\n max_batch_size,\n sliding_window,\n n_kv_heads,\n head_dim\n ))\n # holds the valid length for each batch element in the cache\n self.kv_seqlens = None\n\n def get_view(self, layer_id: int, metadata: RotatingCacheInputMetadata) -> CacheView:\n return CacheView(self.cache_k[layer_id], self.cache_v[layer_id], metadata, self.kv_seqlens)\n\n def reset(self):\n self.kv_seqlens = None\n\n def init_kvseqlens(self, batch_size: int):\n self.kv_seqlens = torch.zeros((batch_size,), device=self.device, dtype=torch.long)\n\n @property\n def device(self):\n return self.cache_k.device\n\n def to(self, device: torch.device, dtype: torch.dtype):\n self.cache_k = self.cache_k.to(device=device, dtype=dtype)\n self.cache_v = self.cache_v.to(device=device, dtype=dtype)\n\n return self\n\n def update_seqlens(self, seqlens: List[int]):\n self.kv_seqlens += torch.tensor(seqlens, device=self.device, dtype=torch.long)\n\n def get_input_metadata(self, seqlens: List[int]) -> RotatingCacheInputMetadata:\n \"\"\"\n inpput = seqlens [5,7,2] // seqpos [0, 1, 3] // sliding_window 3\n --> only cache last 3 tokens in each sequence\n - to_cache_mask = [0 0 1 1 1 | 0 0 0 0 1 1 1 | 1 1]\n - cached_elements = [3 | 3 | 2]\n --> absolute positions are used for rope\n - positions = [0 1 2 3 4 | 1 2 3 4 5 6 7 | 3 4]\n --> cache positions are positions cache_masked, modulo sliding_window + batch_idx * sliding_window\n - cache_positions = [2 0 1 | 5 3 4 | 6 7]\n \"\"\"\n if self.kv_seqlens is None:\n self.init_kvseqlens(len(seqlens))\n assert len(seqlens) == len(self.kv_seqlens), f\"Batch size is {len(self.kv_seqlens)}, got {len(seqlens)}, did you forget to reset cache?\"\n seqpos = self.kv_seqlens.tolist()\n\n assert len(seqlens) > 0, seqlens\n masks = [\n [x >= seqlen - self.sliding_window for x in range(seqlen)]\n for seqlen in seqlens\n ]\n to_cache_mask = torch.tensor(sum(masks, []), device=self.device, dtype=torch.bool)\n cached_elements = torch.tensor([sum(mask) for mask in masks], device=self.device, dtype=torch.long)\n positions = torch.cat([torch.arange(pos, pos + seqlen) for pos, seqlen in zip(seqpos, seqlens)]).to(device=self.device, dtype=torch.long)\n batch_idx = torch.tensor(sum([[i]*seqlen for i, seqlen in enumerate(seqlens)], []), device=self.device, dtype=torch.long)\n cache_positions = positions % self.sliding_window + batch_idx * self.sliding_window\n\n first_prefill = seqpos[0] == 0\n subsequent_prefill = any(seqlen > 1 for seqlen in seqlens)\n if first_prefill:\n assert all([pos == 0 for pos in seqpos]), (seqpos)\n mask = BlockDiagonalCausalMask.from_seqlens(seqlens).make_local_attention(self.sliding_window)\n elif subsequent_prefill:\n mask = BlockDiagonalMask.from_seqlens(\n q_seqlen=seqlens,\n kv_seqlen=[s + cached_s.clamp(max=self.sliding_window).item() for (s, cached_s) in zip(seqlens, self.kv_seqlens)]\n ).make_local_attention_from_bottomright(self.sliding_window)\n else:\n mask = BlockDiagonalCausalWithOffsetPaddedKeysMask.from_seqlens(\n q_seqlen=seqlens,\n kv_padding=self.sliding_window,\n kv_seqlen=(self.kv_seqlens + cached_elements).clamp(max=self.sliding_window).tolist()\n )\n\n return RotatingCacheInputMetadata(\n positions=positions,\n to_cache_mask=to_cache_mask,\n cached_elements=cached_elements,\n cache_positions=cache_positions[to_cache_mask],\n prefill=first_prefill or subsequent_prefill,\n mask=mask,\n seqlens=seqlens,\n )" } ]
import torch import json from torch import nn from dataclasses import dataclass from pathlib import Path from typing import List, Optional from mixtral.rope import precompute_freqs_cis, apply_rotary_emb from mixtral.cache import CacheView, RotatingBufferCache from xformers.ops.fmha import ( memory_efficient_attention, )
4,389
self.experts = torch.nn.ModuleList( [FeedForwardExpert(args, device=device, dtype=dtype) for _ in range(args.moe['num_experts'])] ) def forward(self, x) -> torch.Tensor: g = self.gate(x) g = torch.softmax(g, dim=-1) weights, expert_indices = torch.topk(g, 2, dim=-1) weights /= weights.sum(dim=-1, keepdim=True) result = torch.zeros_like(x) for batch in range(x.shape[0]): w_b, ei_b = weights[batch], expert_indices[batch] for i, w in zip(ei_b, w_b): result[batch] += w * self.experts[i](x[batch]) return result class FeedForwardExpert(nn.Module): def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16): super().__init__() self.w1 = nn.Linear( args.dim, args.hidden_dim, bias=False, device='meta', dtype=dtype ) self.w1.to_empty(device=device) self.w2 = nn.Linear( args.hidden_dim, args.dim, bias=False, device='meta', dtype=dtype ) self.w2.to_empty(device=device) self.w3 = nn.Linear( args.dim, args.hidden_dim, bias=False, device='meta', dtype=dtype ) self.w3.to_empty(device=device) def forward(self, x) -> torch.Tensor: return self.w2(nn.functional.silu(self.w1(x)) * self.w3(x)) class RMSNorm(torch.nn.Module): def __init__(self, dim: int, eps: float = 1e-6): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.ones(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x.float()).type_as(x) return output * self.weight class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16): super().__init__() self.n_heads = args.n_heads self.dim = args.dim self.attention = Attention(args, device=device, dtype=dtype) self.feed_forward = FeedForward(args=args, device=device, dtype=dtype) self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps).to(device, dtype=dtype) self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps).to(device, dtype=dtype) self.args = args def forward( self, x: torch.Tensor, freqs_cis: torch.Tensor, cache: Optional[CacheView] ) -> torch.Tensor: x = x.to(self.attention_norm.weight.device) freqs_cis = freqs_cis.to(self.attention_norm.weight.device) r = self.attention.forward(self.attention_norm(x), freqs_cis, cache) h = x + r r = self.feed_forward.forward(self.ffn_norm(h)) out = h + r return out class Transformer(nn.Module): def __init__(self, args: ModelArgs, devices: List[str], dtype=torch.float16): super().__init__() self.args = args self.vocab_size = args.vocab_size self.n_layers = args.n_layers assert self.vocab_size > 0 self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim, device='meta', dtype=dtype) self.tok_embeddings.to_empty(device=devices[0]) self.layers = torch.nn.ModuleList( [ TransformerBlock(args=args, device=devices[(i * len(devices)) // args.n_layers], dtype=dtype) for i in range(args.n_layers) ] ) self.norm = RMSNorm(args.dim, eps=args.norm_eps).to(devices[0], dtype=dtype) self.output = nn.Linear( args.dim, args.vocab_size, bias=False, device='meta', dtype=dtype ) self.output.to_empty(device=devices[0])
@dataclass class MoeArgs: num_experts_per_tok: int num_experts: int @dataclass class ModelArgs: dim: int n_layers: int head_dim: int hidden_dim: int n_heads: int n_kv_heads: int norm_eps: float vocab_size: int moe: MoeArgs max_batch_size: int = 0 @dataclass class SimpleInputMetadata: # rope absolute positions positions: torch.Tensor @staticmethod def from_seqlens(seqlens: List[int], device: torch.device) -> "SimpleInputMetadata": return SimpleInputMetadata( positions = torch.cat( [torch.arange(0, seqlen) for seqlen in seqlens] ).to(device=device, dtype=torch.long) ) def repeat_kv(keys: torch.Tensor, values: torch.Tensor, repeats: int, dim: int): keys = torch.repeat_interleave(keys, repeats=repeats, dim=dim) values = torch.repeat_interleave(values, repeats=repeats, dim=dim) return keys, values class Attention(nn.Module): def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16): super().__init__() self.args = args self.n_heads: int = args.n_heads self.n_kv_heads: int = args.n_kv_heads self.repeats = self.n_heads // self.n_kv_heads self.scale = self.args.head_dim**-0.5 self.wq = nn.Linear( args.dim, args.n_heads * args.head_dim, bias=False, device='meta', dtype=dtype ) self.wq.to_empty(device=device) self.wk = nn.Linear( args.dim, args.n_kv_heads * args.head_dim, bias=False, device='meta', dtype=dtype ) self.wk.to_empty(device=device) self.wv = nn.Linear( args.dim, args.n_kv_heads * args.head_dim, bias=False, device='meta', dtype=dtype ) self.wv.to_empty(device=device) self.wo = nn.Linear( args.n_heads * args.head_dim, args.dim, bias=False, device='meta', dtype=dtype ) self.wo.to_empty(device=device) def forward( self, x: torch.Tensor, freqs_cis: torch.Tensor, cache: Optional[CacheView], ) -> torch.Tensor: seqlen_sum, _ = x.shape xq, xk, xv = self.wq(x), self.wk(x), self.wv(x) xq = xq.view(seqlen_sum, self.n_heads, self.args.head_dim) xk = xk.view(seqlen_sum, self.n_kv_heads, self.args.head_dim) xv = xv.view(seqlen_sum, self.n_kv_heads, self.args.head_dim) xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis) xk = xk.to('cuda:0') xv = xv.to('cuda:0') if cache is None: key, val = xk, xv elif cache.prefill: key, val = cache.interleave_kv(xk, xv) cache.update(xk, xv) else: cache.update(xk, xv) key, val = cache.key, cache.value key = key.view(seqlen_sum * cache.sliding_window, self.n_kv_heads, self.args.head_dim) val = val.view(seqlen_sum * cache.sliding_window, self.n_kv_heads, self.args.head_dim) key, val = key.to(x.device), val.to(x.device) # Repeat keys and values to match number of query heads key, val = repeat_kv(key, val, self.repeats, dim=1) # xformers requires (B=1, S, H, D) xq, key, val = xq[None, ...], key[None, ...], val[None, ...] output = memory_efficient_attention(xq, key, val, None if cache is None else cache.mask) return self.wo(output.view_as(x)) class FeedForward(nn.Module): def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16): super().__init__() self.gate = nn.Linear(args.dim, args.moe['num_experts'], bias=False, device='meta', dtype=dtype) self.gate.to_empty(device=device) self.experts = torch.nn.ModuleList( [FeedForwardExpert(args, device=device, dtype=dtype) for _ in range(args.moe['num_experts'])] ) def forward(self, x) -> torch.Tensor: g = self.gate(x) g = torch.softmax(g, dim=-1) weights, expert_indices = torch.topk(g, 2, dim=-1) weights /= weights.sum(dim=-1, keepdim=True) result = torch.zeros_like(x) for batch in range(x.shape[0]): w_b, ei_b = weights[batch], expert_indices[batch] for i, w in zip(ei_b, w_b): result[batch] += w * self.experts[i](x[batch]) return result class FeedForwardExpert(nn.Module): def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16): super().__init__() self.w1 = nn.Linear( args.dim, args.hidden_dim, bias=False, device='meta', dtype=dtype ) self.w1.to_empty(device=device) self.w2 = nn.Linear( args.hidden_dim, args.dim, bias=False, device='meta', dtype=dtype ) self.w2.to_empty(device=device) self.w3 = nn.Linear( args.dim, args.hidden_dim, bias=False, device='meta', dtype=dtype ) self.w3.to_empty(device=device) def forward(self, x) -> torch.Tensor: return self.w2(nn.functional.silu(self.w1(x)) * self.w3(x)) class RMSNorm(torch.nn.Module): def __init__(self, dim: int, eps: float = 1e-6): super().__init__() self.eps = eps self.weight = nn.Parameter(torch.ones(dim)) def _norm(self, x): return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) def forward(self, x): output = self._norm(x.float()).type_as(x) return output * self.weight class TransformerBlock(nn.Module): def __init__(self, args: ModelArgs, device='cuda', dtype=torch.float16): super().__init__() self.n_heads = args.n_heads self.dim = args.dim self.attention = Attention(args, device=device, dtype=dtype) self.feed_forward = FeedForward(args=args, device=device, dtype=dtype) self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps).to(device, dtype=dtype) self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps).to(device, dtype=dtype) self.args = args def forward( self, x: torch.Tensor, freqs_cis: torch.Tensor, cache: Optional[CacheView] ) -> torch.Tensor: x = x.to(self.attention_norm.weight.device) freqs_cis = freqs_cis.to(self.attention_norm.weight.device) r = self.attention.forward(self.attention_norm(x), freqs_cis, cache) h = x + r r = self.feed_forward.forward(self.ffn_norm(h)) out = h + r return out class Transformer(nn.Module): def __init__(self, args: ModelArgs, devices: List[str], dtype=torch.float16): super().__init__() self.args = args self.vocab_size = args.vocab_size self.n_layers = args.n_layers assert self.vocab_size > 0 self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim, device='meta', dtype=dtype) self.tok_embeddings.to_empty(device=devices[0]) self.layers = torch.nn.ModuleList( [ TransformerBlock(args=args, device=devices[(i * len(devices)) // args.n_layers], dtype=dtype) for i in range(args.n_layers) ] ) self.norm = RMSNorm(args.dim, eps=args.norm_eps).to(devices[0], dtype=dtype) self.output = nn.Linear( args.dim, args.vocab_size, bias=False, device='meta', dtype=dtype ) self.output.to_empty(device=devices[0])
self.freqs_cis = precompute_freqs_cis(self.args.head_dim, 128_000, 1e6).to(devices[0])
0
2023-12-08 22:48:32+00:00
8k
u2seg/U2Seg
detectron2/evaluation/sem_seg_evaluation.py
[ { "identifier": "DatasetCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "all_gather", "path": "detectron2/utils/comm.py", "snippet": "def all_gather(data, group=None):\n \"\"\"\n Run all_gather on arbitrary picklable data (not necessarily tensors).\n\n Args:\n data: any picklable object\n group: a torch process group. By default, will use a group which\n contains all ranks on gloo backend.\n\n Returns:\n list[data]: list of data gathered from each rank\n \"\"\"\n if get_world_size() == 1:\n return [data]\n if group is None:\n group = _get_global_gloo_group() # use CPU group by default, to reduce GPU RAM usage.\n world_size = dist.get_world_size(group)\n if world_size == 1:\n return [data]\n\n output = [None for _ in range(world_size)]\n dist.all_gather_object(output, data, group=group)\n return output" }, { "identifier": "is_main_process", "path": "detectron2/utils/comm.py", "snippet": "def is_main_process() -> bool:\n return get_rank() == 0" }, { "identifier": "synchronize", "path": "detectron2/utils/comm.py", "snippet": "def synchronize():\n \"\"\"\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n \"\"\"\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n world_size = dist.get_world_size()\n if world_size == 1:\n return\n if dist.get_backend() == dist.Backend.NCCL:\n # This argument is needed to avoid warnings.\n # It's valid only for NCCL backend.\n dist.barrier(device_ids=[torch.cuda.current_device()])\n else:\n dist.barrier()" }, { "identifier": "PathManager", "path": "detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "DatasetEvaluator", "path": "detectron2/evaluation/evaluator.py", "snippet": "class DatasetEvaluator:\n \"\"\"\n Base class for a dataset evaluator.\n\n The function :func:`inference_on_dataset` runs the model over\n all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.\n\n This class will accumulate information of the inputs/outputs (by :meth:`process`),\n and produce evaluation results in the end (by :meth:`evaluate`).\n \"\"\"\n\n def reset(self):\n \"\"\"\n Preparation for a new round of evaluation.\n Should be called before starting a round of evaluation.\n \"\"\"\n pass\n\n def process(self, inputs, outputs):\n \"\"\"\n Process the pair of inputs and outputs.\n If they contain batches, the pairs can be consumed one-by-one using `zip`:\n\n .. code-block:: python\n\n for input_, output in zip(inputs, outputs):\n # do evaluation on single input/output pair\n ...\n\n Args:\n inputs (list): the inputs that's used to call the model.\n outputs (list): the return value of `model(inputs)`\n \"\"\"\n pass\n\n def evaluate(self):\n \"\"\"\n Evaluate/summarize the performance, after processing all input/output pairs.\n\n Returns:\n dict:\n A new evaluator class can return a dict of arbitrary format\n as long as the user can process the results.\n In our train_net.py, we expect the following format:\n\n * key: the name of the task (e.g., bbox)\n * value: a dict of {metric name: score}, e.g.: {\"AP50\": 80}\n \"\"\"\n pass" } ]
import itertools import json import logging import numpy as np import os import pycocotools.mask as mask_util import torch import cv2 # noqa from collections import OrderedDict from typing import Optional, Union from PIL import Image from detectron2.data import DatasetCatalog, MetadataCatalog from detectron2.utils.comm import all_gather, is_main_process, synchronize from detectron2.utils.file_io import PathManager from .evaluator import DatasetEvaluator
3,646
outputs: the outputs of a model. It is either list of semantic segmentation predictions (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic segmentation prediction in the same format. """ for input, output in zip(inputs, outputs): output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) pred = np.array(output, dtype=int) gt_filename = self.input_file_to_gt_file[input["file_name"]] gt = self.sem_seg_loading_fn(gt_filename, dtype=int) # here for hungarian_matching self.do_hangarain_mapping(coco_results=pred, gt=gt) # this is transfer the pred to # # # transfer to supercategory # gt[gt == self._ignore_label] = self._num_classes # mapping_dict = json.load( # open('/home/niudt/detectron2/tools/hungarain_matching/cocotrain_300/semantic_mapping.json')) # for cls in mapping_dict: # # cls = int(_cls) # if mapping_dict[cls] == -1: # pred[pred == int(cls)] = 0 # self._num_classes # else: # pred[pred == int(cls)] = mapping_dict[cls] self._conf_matrix += np.bincount( (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1), minlength=self._conf_matrix.size, ).reshape(self._conf_matrix.shape) if self._compute_boundary_iou: b_gt = self._mask_to_boundary(gt.astype(np.uint8)) b_pred = self._mask_to_boundary(pred.astype(np.uint8)) self._b_conf_matrix += np.bincount( (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1), minlength=self._conf_matrix.size, ).reshape(self._conf_matrix.shape) self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"])) def process(self, inputs, outputs): """ Args: inputs: the inputs to a model. It is a list of dicts. Each dict corresponds to an image and contains keys like "height", "width", "file_name". outputs: the outputs of a model. It is either list of semantic segmentation predictions (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic segmentation prediction in the same format. """ for input, output in zip(inputs, outputs): output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) pred = np.array(output, dtype=int) gt_filename = self.input_file_to_gt_file[input["file_name"]] gt = self.sem_seg_loading_fn(gt_filename, dtype=int) # transfer to supercategory # self._num_classes = 28 _gt = self.transfer(gt) _gt[_gt == self._ignore_label] = 16#self._num_classes # here for hungarian_matching self.do_hangarain_mapping(coco_results=pred, gt=_gt, save_path='/home/niudt/u2seg_test/detectron2/tools/seg.json') # # transfer to supercategory # # self._num_classes = 28 # gt = self.transfer(gt) # gt[gt == self._ignore_label] = 16 # self._num_classes # mapping_dict = json.load(open('/home/niudt/detectron2/tools/3x_800_cocotrain_test/semantic_mapping.json')) # for cls in mapping_dict: # # cls = int(_cls) # if mapping_dict[cls] == -1: # pred[pred == int(cls)] = self._num_classes # else: # pred[pred == int(cls)] = mapping_dict[cls] self._conf_matrix += np.bincount( (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1), minlength=self._conf_matrix.size, ).reshape(self._conf_matrix.shape) if self._compute_boundary_iou: b_gt = self._mask_to_boundary(gt.astype(np.uint8)) b_pred = self._mask_to_boundary(pred.astype(np.uint8)) self._b_conf_matrix += np.bincount( (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1), minlength=self._conf_matrix.size, ).reshape(self._conf_matrix.shape) self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"])) def evaluate(self): """ Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): * Mean intersection-over-union averaged across classes (mIoU) * Frequency Weighted IoU (fwIoU) * Mean pixel accuracy averaged across classes (mACC) * Pixel Accuracy (pACC) """ mapping_dict = self.hungarain_matching(all_preds=np.array(self.pred_det_cate), all_targets=np.array(self.pseudo_gt_cate), num_classes=15, num_labeled=27) print(mapping_dict) # # # save the mapping dict save_root = '/home/niudt/u2seg_test/detectron2/tools/hungarain_matching/cocotrain_300' os.makedirs(save_root,exist_ok=True) save_path = os.path.join(save_root, 'semantic_mapping.json') with open(save_path, 'w', encoding='utf-8') as f: json.dump(mapping_dict, f, ensure_ascii=False) assert 1 == 0 if self._distributed: synchronize()
# Copyright (c) Facebook, Inc. and its affiliates. _CV2_IMPORTED = True try: except ImportError: # OpenCV is an optional dependency at the moment _CV2_IMPORTED = False def load_image_into_numpy_array( filename: str, copy: bool = False, dtype: Optional[Union[np.dtype, str]] = None, ) -> np.ndarray: with PathManager.open(filename, "rb") as f: array = np.array(Image.open(f), copy=copy, dtype=dtype) return array class SemSegEvaluator(DatasetEvaluator): """ Evaluate semantic segmentation metrics. """ def __init__( self, dataset_name, distributed=True, output_dir=None, *, sem_seg_loading_fn=load_image_into_numpy_array, num_classes=None, ignore_label=None, ): """ Args: dataset_name (str): name of the dataset to be evaluated. distributed (bool): if True, will collect results from all ranks for evaluation. Otherwise, will evaluate the results in the current process. output_dir (str): an output directory to dump results. sem_seg_loading_fn: function to read sem seg file and load into numpy array. Default provided, but projects can customize. num_classes, ignore_label: deprecated argument """ self._logger = logging.getLogger(__name__) if num_classes is not None: self._logger.warn( "SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata." ) if ignore_label is not None: self._logger.warn( "SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata." ) self._dataset_name = dataset_name self._distributed = distributed self._output_dir = output_dir self._cpu_device = torch.device("cpu") self.input_file_to_gt_file = { dataset_record["file_name"]: dataset_record["sem_seg_file_name"] for dataset_record in DatasetCatalog.get(dataset_name) } meta = MetadataCatalog.get(dataset_name) # Dict that maps contiguous training ids to COCO category ids try: c2d = meta.stuff_dataset_id_to_contiguous_id self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()} except AttributeError: self._contiguous_id_to_dataset_id = None self._class_names = meta.stuff_classes self.sem_seg_loading_fn = sem_seg_loading_fn self._num_classes = len(meta.stuff_classes) if num_classes is not None: assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}" self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label # This is because cv2.erode did not work for int datatype. Only works for uint8. self._compute_boundary_iou = True if not _CV2_IMPORTED: self._compute_boundary_iou = False self._logger.warn( """Boundary IoU calculation requires OpenCV. B-IoU metrics are not going to be computed because OpenCV is not available to import.""" ) if self._num_classes >= np.iinfo(np.uint8).max: self._compute_boundary_iou = False self._logger.warn( f"""SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation! B-IoU metrics are not going to be computed. Max allowed value (exclusive) for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}. The number of classes of dataset {self._dataset_name} is {self._num_classes}""" ) def reset(self): self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64) self._b_conf_matrix = np.zeros( (self._num_classes + 1, self._num_classes + 1), dtype=np.int64 ) self._predictions = [] def do_hangarain_mapping(self, coco_results, gt): # here do the hungarian matching # create gt mapping dict # dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id # gt_cate_mapping = {k: v for k, v in dataset_id_to_contiguous_id.items()} # do the box matching based computing IOU, create pseudo gt pred_num_mask = np.unique(coco_results) gt_num_mask = np.unique(gt) for pred in pred_num_mask: if pred == 0: continue mask_pred = (coco_results == pred) for _gt in gt_num_mask: if _gt == 0 or _gt == 255: continue mask_gt = (gt == _gt) iou = np.sum((mask_pred * mask_gt)) / np.sum((mask_pred + mask_gt)) if iou > 0.45: # TODO: find that thresh self.pseudo_gt_cate.append(_gt) self.pred_det_cate.append(pred) continue def process_cityscapes(self, inputs, outputs): """ Args: inputs: the inputs to a model. It is a list of dicts. Each dict corresponds to an image and contains keys like "height", "width", "file_name". outputs: the outputs of a model. It is either list of semantic segmentation predictions (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic segmentation prediction in the same format. """ for input, output in zip(inputs, outputs): output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) pred = np.array(output, dtype=int) gt_filename = self.input_file_to_gt_file[input["file_name"]] gt = self.sem_seg_loading_fn(gt_filename, dtype=int) # here for hungarian_matching self.do_hangarain_mapping(coco_results=pred, gt=gt) # this is transfer the pred to # # # transfer to supercategory # gt[gt == self._ignore_label] = self._num_classes # mapping_dict = json.load( # open('/home/niudt/detectron2/tools/hungarain_matching/cocotrain_300/semantic_mapping.json')) # for cls in mapping_dict: # # cls = int(_cls) # if mapping_dict[cls] == -1: # pred[pred == int(cls)] = 0 # self._num_classes # else: # pred[pred == int(cls)] = mapping_dict[cls] self._conf_matrix += np.bincount( (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1), minlength=self._conf_matrix.size, ).reshape(self._conf_matrix.shape) if self._compute_boundary_iou: b_gt = self._mask_to_boundary(gt.astype(np.uint8)) b_pred = self._mask_to_boundary(pred.astype(np.uint8)) self._b_conf_matrix += np.bincount( (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1), minlength=self._conf_matrix.size, ).reshape(self._conf_matrix.shape) self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"])) def process(self, inputs, outputs): """ Args: inputs: the inputs to a model. It is a list of dicts. Each dict corresponds to an image and contains keys like "height", "width", "file_name". outputs: the outputs of a model. It is either list of semantic segmentation predictions (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic segmentation prediction in the same format. """ for input, output in zip(inputs, outputs): output = output["sem_seg"].argmax(dim=0).to(self._cpu_device) pred = np.array(output, dtype=int) gt_filename = self.input_file_to_gt_file[input["file_name"]] gt = self.sem_seg_loading_fn(gt_filename, dtype=int) # transfer to supercategory # self._num_classes = 28 _gt = self.transfer(gt) _gt[_gt == self._ignore_label] = 16#self._num_classes # here for hungarian_matching self.do_hangarain_mapping(coco_results=pred, gt=_gt, save_path='/home/niudt/u2seg_test/detectron2/tools/seg.json') # # transfer to supercategory # # self._num_classes = 28 # gt = self.transfer(gt) # gt[gt == self._ignore_label] = 16 # self._num_classes # mapping_dict = json.load(open('/home/niudt/detectron2/tools/3x_800_cocotrain_test/semantic_mapping.json')) # for cls in mapping_dict: # # cls = int(_cls) # if mapping_dict[cls] == -1: # pred[pred == int(cls)] = self._num_classes # else: # pred[pred == int(cls)] = mapping_dict[cls] self._conf_matrix += np.bincount( (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1), minlength=self._conf_matrix.size, ).reshape(self._conf_matrix.shape) if self._compute_boundary_iou: b_gt = self._mask_to_boundary(gt.astype(np.uint8)) b_pred = self._mask_to_boundary(pred.astype(np.uint8)) self._b_conf_matrix += np.bincount( (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1), minlength=self._conf_matrix.size, ).reshape(self._conf_matrix.shape) self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"])) def evaluate(self): """ Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval): * Mean intersection-over-union averaged across classes (mIoU) * Frequency Weighted IoU (fwIoU) * Mean pixel accuracy averaged across classes (mACC) * Pixel Accuracy (pACC) """ mapping_dict = self.hungarain_matching(all_preds=np.array(self.pred_det_cate), all_targets=np.array(self.pseudo_gt_cate), num_classes=15, num_labeled=27) print(mapping_dict) # # # save the mapping dict save_root = '/home/niudt/u2seg_test/detectron2/tools/hungarain_matching/cocotrain_300' os.makedirs(save_root,exist_ok=True) save_path = os.path.join(save_root, 'semantic_mapping.json') with open(save_path, 'w', encoding='utf-8') as f: json.dump(mapping_dict, f, ensure_ascii=False) assert 1 == 0 if self._distributed: synchronize()
conf_matrix_list = all_gather(self._conf_matrix)
1
2023-12-05 01:13:31+00:00
8k
upfusion3d/upfusion
diffusion/pipeline_control_net.py
[ { "identifier": "create_model", "path": "control_net/cldm/model.py", "snippet": "def create_model(config_path):\n config = OmegaConf.load(config_path)\n model = instantiate_from_config(config.model).cpu()\n print(f'Loaded model config from [{config_path}]')\n return model" }, { "identifier": "load_state_dict", "path": "control_net/cldm/model.py", "snippet": "def load_state_dict(ckpt_path, location='cpu'):\n _, extension = os.path.splitext(ckpt_path)\n if extension.lower() == \".safetensors\":\n import safetensors.torch\n state_dict = safetensors.torch.load_file(ckpt_path, device=location)\n else:\n state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location)))\n state_dict = get_state_dict(state_dict)\n print(f'Loaded state_dict from [{ckpt_path}]')\n return state_dict" }, { "identifier": "DDIMSampler", "path": "control_net/ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n disable_tdqm=kwargs.get(\"disable_tdqm\", False),\n cfg_type=kwargs.get(\"cfg_type\", None)\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None, disable_tdqm=False, cfg_type=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps, disable=disable_tdqm)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n raise RuntimeError(\"not supported since this may mess up the new cfg logic\")\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim_v2(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold, cfg_type=cfg_type)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if (isinstance(c[k], list)) and (type(c[k][0]) is not PerspectiveCameras):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n elif (isinstance(c[k], list)) and ((type(c[k][0]) is PerspectiveCameras) or (c[k][0] is None)):\n c_in[k] = unconditional_conditioning[k] + c[k]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def p_sample_ddim_v2(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None, cfg_type=None):\n # NOTE: v2 is a custom version so that modifications can be made more easily\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n if not isinstance(c, dict):\n raise RuntimeError(\"Not supported!\")\n\n # For cfg_type \"legacy\" or \"F1\"\n if isinstance(unconditional_conditioning, dict):\n c_in = dict()\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n for k in c:\n if (isinstance(c[k], list)) and (type(c[k][0]) is not PerspectiveCameras):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n elif (isinstance(c[k], list)) and ((type(c[k][0]) is PerspectiveCameras) or (c[k][0] is None)):\n c_in[k] = unconditional_conditioning[k] + c[k]\n elif (isinstance(c[k], torch.Tensor)):\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]], dim=0)\n else:\n raise RuntimeError(\"Not supported!\")\n\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n elif isinstance(unconditional_conditioning, list):\n raise ValueError\n\n else:\n raise RuntimeError(\"Not supported!\")\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n raise RuntimeError(\"Function supported since the new cfg logic is not incorporated here\")\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(\n self, x_latent, cond, t_start, cfg_type=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None\n ):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps, disable=True)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim_v2(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, \n cfg_type=cfg_type, unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning\n )\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn from control_net.cldm.model import create_model, load_state_dict from control_net.ldm.models.diffusion.ddim import DDIMSampler
5,730
class DiffusionPipelineCN(nn.Module): def __init__(self, cfg, srt_model=None, dino_model=None): super().__init__() self.cfg = cfg self.control_net_model_config_path = self.cfg.control_net_model_config_path self.prompt_color = self.cfg.control_net_prompt_color self._setup_model() self.srt_model = srt_model self.dino_model = dino_model self.cond_type = self.cfg.cond_type if self.cond_type == "DF": self._create_batch_dict_fn = self._create_batch_dict_df self._maybe_dropout_condition_fn = self._maybe_dropout_condition_df elif self.cond_type == "SLT": self._create_batch_dict_fn = self._create_batch_dict_slt self._maybe_dropout_condition_fn = self._maybe_dropout_condition_slt elif self.cond_type == "DF+SLT": self._create_batch_dict_fn = self._create_batch_dict_dfslt self._maybe_dropout_condition_fn = self._maybe_dropout_condition_dfslt else: raise ValueError def _setup_model(self):
class DiffusionPipelineCN(nn.Module): def __init__(self, cfg, srt_model=None, dino_model=None): super().__init__() self.cfg = cfg self.control_net_model_config_path = self.cfg.control_net_model_config_path self.prompt_color = self.cfg.control_net_prompt_color self._setup_model() self.srt_model = srt_model self.dino_model = dino_model self.cond_type = self.cfg.cond_type if self.cond_type == "DF": self._create_batch_dict_fn = self._create_batch_dict_df self._maybe_dropout_condition_fn = self._maybe_dropout_condition_df elif self.cond_type == "SLT": self._create_batch_dict_fn = self._create_batch_dict_slt self._maybe_dropout_condition_fn = self._maybe_dropout_condition_slt elif self.cond_type == "DF+SLT": self._create_batch_dict_fn = self._create_batch_dict_dfslt self._maybe_dropout_condition_fn = self._maybe_dropout_condition_dfslt else: raise ValueError def _setup_model(self):
model = create_model(self.cfg.control_net_model_config_path).cpu()
0
2023-12-12 00:49:11+00:00
8k
modelscope/normal-depth-diffusion
libs/omnidata_torch/lib/midas_31/api.py
[ { "identifier": "DPTDepthModel", "path": "libs/omnidata_torch/lib/midas_31/midas/dpt_depth.py", "snippet": "class DPTDepthModel(DPT):\n def __init__(self, path=None, non_negative=True, **kwargs):\n features = kwargs[\"features\"] if \"features\" in kwargs else 256\n head_features_1 = kwargs[\"head_features_1\"] if \"head_features_1\" in kwargs else features\n head_features_2 = kwargs[\"head_features_2\"] if \"head_features_2\" in kwargs else 32\n kwargs.pop(\"head_features_1\", None)\n kwargs.pop(\"head_features_2\", None)\n\n head = nn.Sequential(\n nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1),\n Interpolate(scale_factor=2, mode=\"bilinear\", align_corners=True),\n nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),\n nn.ReLU(True),\n nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),\n nn.ReLU(True) if non_negative else nn.Identity(),\n nn.Identity(),\n )\n\n super().__init__(head, **kwargs)\n\n if path is not None:\n self.load(path)\n\n def forward(self, x):\n return super().forward(x).squeeze(dim=1)" }, { "identifier": "MidasNet", "path": "libs/omnidata_torch/lib/midas_31/midas/midas_net.py", "snippet": "class MidasNet(BaseModel):\n \"\"\"Network for monocular depth estimation.\n \"\"\"\n\n def __init__(self, path=None, features=256, non_negative=True):\n \"\"\"Init.\n\n Args:\n path (str, optional): Path to saved model. Defaults to None.\n features (int, optional): Number of features. Defaults to 256.\n backbone (str, optional): Backbone network for encoder. Defaults to resnet50\n \"\"\"\n print(\"Loading weights: \", path)\n\n super(MidasNet, self).__init__()\n\n use_pretrained = False if path is None else True\n\n self.pretrained, self.scratch = _make_encoder(backbone=\"resnext101_wsl\", features=features, use_pretrained=use_pretrained)\n\n self.scratch.refinenet4 = FeatureFusionBlock(features)\n self.scratch.refinenet3 = FeatureFusionBlock(features)\n self.scratch.refinenet2 = FeatureFusionBlock(features)\n self.scratch.refinenet1 = FeatureFusionBlock(features)\n\n self.scratch.output_conv = nn.Sequential(\n nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),\n Interpolate(scale_factor=2, mode=\"bilinear\"),\n nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),\n nn.ReLU(True),\n nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),\n nn.ReLU(True) if non_negative else nn.Identity(),\n )\n\n if path:\n self.load(path)\n\n def forward(self, x):\n \"\"\"Forward pass.\n\n Args:\n x (tensor): input data (image)\n\n Returns:\n tensor: depth\n \"\"\"\n\n layer_1 = self.pretrained.layer1(x)\n layer_2 = self.pretrained.layer2(layer_1)\n layer_3 = self.pretrained.layer3(layer_2)\n layer_4 = self.pretrained.layer4(layer_3)\n\n layer_1_rn = self.scratch.layer1_rn(layer_1)\n layer_2_rn = self.scratch.layer2_rn(layer_2)\n layer_3_rn = self.scratch.layer3_rn(layer_3)\n layer_4_rn = self.scratch.layer4_rn(layer_4)\n\n path_4 = self.scratch.refinenet4(layer_4_rn)\n path_3 = self.scratch.refinenet3(path_4, layer_3_rn)\n path_2 = self.scratch.refinenet2(path_3, layer_2_rn)\n path_1 = self.scratch.refinenet1(path_2, layer_1_rn)\n\n out = self.scratch.output_conv(path_1)\n\n return torch.squeeze(out, dim=1)" }, { "identifier": "MidasNet_small", "path": "libs/omnidata_torch/lib/midas_31/midas/midas_net_custom.py", "snippet": "class MidasNet_small(BaseModel):\n \"\"\"Network for monocular depth estimation.\n \"\"\"\n\n def __init__(self, path=None, features=64, backbone=\"efficientnet_lite3\", non_negative=True, exportable=True, channels_last=False, align_corners=True,\n blocks={'expand': True}):\n \"\"\"Init.\n\n Args:\n path (str, optional): Path to saved model. Defaults to None.\n features (int, optional): Number of features. Defaults to 256.\n backbone (str, optional): Backbone network for encoder. Defaults to resnet50\n \"\"\"\n print(\"Loading weights: \", path)\n\n super(MidasNet_small, self).__init__()\n\n use_pretrained = False if path else True\n \n self.channels_last = channels_last\n self.blocks = blocks\n self.backbone = backbone\n\n self.groups = 1\n\n features1=features\n features2=features\n features3=features\n features4=features\n self.expand = False\n if \"expand\" in self.blocks and self.blocks['expand'] == True:\n self.expand = True\n features1=features\n features2=features*2\n features3=features*4\n features4=features*8\n\n self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)\n \n self.scratch.activation = nn.ReLU(False) \n\n self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)\n self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)\n self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)\n self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)\n\n \n self.scratch.output_conv = nn.Sequential(\n nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),\n Interpolate(scale_factor=2, mode=\"bilinear\"),\n nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),\n self.scratch.activation,\n nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),\n nn.ReLU(True) if non_negative else nn.Identity(),\n nn.Identity(),\n )\n \n if path:\n self.load(path)\n\n\n def forward(self, x):\n \"\"\"Forward pass.\n\n Args:\n x (tensor): input data (image)\n\n Returns:\n tensor: depth\n \"\"\"\n if self.channels_last==True:\n print(\"self.channels_last = \", self.channels_last)\n x.contiguous(memory_format=torch.channels_last)\n\n\n layer_1 = self.pretrained.layer1(x)\n layer_2 = self.pretrained.layer2(layer_1)\n layer_3 = self.pretrained.layer3(layer_2)\n layer_4 = self.pretrained.layer4(layer_3)\n \n layer_1_rn = self.scratch.layer1_rn(layer_1)\n layer_2_rn = self.scratch.layer2_rn(layer_2)\n layer_3_rn = self.scratch.layer3_rn(layer_3)\n layer_4_rn = self.scratch.layer4_rn(layer_4)\n\n\n path_4 = self.scratch.refinenet4(layer_4_rn)\n path_3 = self.scratch.refinenet3(path_4, layer_3_rn)\n path_2 = self.scratch.refinenet2(path_3, layer_2_rn)\n path_1 = self.scratch.refinenet1(path_2, layer_1_rn)\n \n out = self.scratch.output_conv(path_1)\n\n return torch.squeeze(out, dim=1)" }, { "identifier": "Resize", "path": "libs/omnidata_torch/lib/midas_31/midas/transforms.py", "snippet": "class Resize(object):\n \"\"\"Resize sample to given size (width, height).\n \"\"\"\n\n def __init__(\n self,\n width,\n height,\n resize_target=True,\n keep_aspect_ratio=False,\n ensure_multiple_of=1,\n resize_method=\"lower_bound\",\n image_interpolation_method=cv2.INTER_AREA,\n ):\n \"\"\"Init.\n\n Args:\n width (int): desired output width\n height (int): desired output height\n resize_target (bool, optional):\n True: Resize the full sample (image, mask, target).\n False: Resize image only.\n Defaults to True.\n keep_aspect_ratio (bool, optional):\n True: Keep the aspect ratio of the input sample.\n Output sample might not have the given width and height, and\n resize behaviour depends on the parameter 'resize_method'.\n Defaults to False.\n ensure_multiple_of (int, optional):\n Output width and height is constrained to be multiple of this parameter.\n Defaults to 1.\n resize_method (str, optional):\n \"lower_bound\": Output will be at least as large as the given size.\n \"upper_bound\": Output will be at max as large as the given size. (Output size might be smaller than given size.)\n \"minimal\": Scale as least as possible. (Output size might be smaller than given size.)\n Defaults to \"lower_bound\".\n \"\"\"\n self.__width = width\n self.__height = height\n\n self.__resize_target = resize_target\n self.__keep_aspect_ratio = keep_aspect_ratio\n self.__multiple_of = ensure_multiple_of\n self.__resize_method = resize_method\n self.__image_interpolation_method = image_interpolation_method\n\n def constrain_to_multiple_of(self, x, min_val=0, max_val=None):\n y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)\n\n if max_val is not None and y > max_val:\n y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)\n\n if y < min_val:\n y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)\n\n return y\n\n def get_size(self, width, height):\n # determine new height and width\n scale_height = self.__height / height\n scale_width = self.__width / width\n\n if self.__keep_aspect_ratio:\n if self.__resize_method == \"lower_bound\":\n # scale such that output size is lower bound\n if scale_width > scale_height:\n # fit width\n scale_height = scale_width\n else:\n # fit height\n scale_width = scale_height\n elif self.__resize_method == \"upper_bound\":\n # scale such that output size is upper bound\n if scale_width < scale_height:\n # fit width\n scale_height = scale_width\n else:\n # fit height\n scale_width = scale_height\n elif self.__resize_method == \"minimal\":\n # scale as least as possbile\n if abs(1 - scale_width) < abs(1 - scale_height):\n # fit width\n scale_height = scale_width\n else:\n # fit height\n scale_width = scale_height\n else:\n raise ValueError(\n f\"resize_method {self.__resize_method} not implemented\"\n )\n\n if self.__resize_method == \"lower_bound\":\n new_height = self.constrain_to_multiple_of(\n scale_height * height, min_val=self.__height\n )\n new_width = self.constrain_to_multiple_of(\n scale_width * width, min_val=self.__width\n )\n elif self.__resize_method == \"upper_bound\":\n new_height = self.constrain_to_multiple_of(\n scale_height * height, max_val=self.__height\n )\n new_width = self.constrain_to_multiple_of(\n scale_width * width, max_val=self.__width\n )\n elif self.__resize_method == \"minimal\":\n new_height = self.constrain_to_multiple_of(scale_height * height)\n new_width = self.constrain_to_multiple_of(scale_width * width)\n else:\n raise ValueError(f\"resize_method {self.__resize_method} not implemented\")\n\n return (new_width, new_height)\n\n def __call__(self, sample):\n width, height = self.get_size(\n sample[\"image\"].shape[1], sample[\"image\"].shape[0]\n )\n\n # resize sample\n sample[\"image\"] = cv2.resize(\n sample[\"image\"],\n (width, height),\n interpolation=self.__image_interpolation_method,\n )\n\n if self.__resize_target:\n if \"disparity\" in sample:\n sample[\"disparity\"] = cv2.resize(\n sample[\"disparity\"],\n (width, height),\n interpolation=cv2.INTER_NEAREST,\n )\n\n if \"depth\" in sample:\n sample[\"depth\"] = cv2.resize(\n sample[\"depth\"], (width, height), interpolation=cv2.INTER_NEAREST\n )\n\n sample[\"mask\"] = cv2.resize(\n sample[\"mask\"].astype(np.float32),\n (width, height),\n interpolation=cv2.INTER_NEAREST,\n )\n sample[\"mask\"] = sample[\"mask\"].astype(bool)\n\n return sample" }, { "identifier": "NormalizeImage", "path": "libs/omnidata_torch/lib/midas_31/midas/transforms.py", "snippet": "class NormalizeImage(object):\n \"\"\"Normlize image by given mean and std.\n \"\"\"\n\n def __init__(self, mean, std):\n self.__mean = mean\n self.__std = std\n\n def __call__(self, sample):\n # [0,1] ->[-0.5,0.5]->[-1,1]\n sample[\"image\"] = (sample[\"image\"] - self.__mean) / self.__std\n\n return sample" }, { "identifier": "PrepareForNet", "path": "libs/omnidata_torch/lib/midas_31/midas/transforms.py", "snippet": "class PrepareForNet(object):\n \"\"\"Prepare sample for usage as network input.\n \"\"\"\n\n def __init__(self):\n pass\n\n def __call__(self, sample):\n image = np.transpose(sample[\"image\"], (2, 0, 1))\n sample[\"image\"] = np.ascontiguousarray(image).astype(np.float32)\n\n if \"mask\" in sample:\n sample[\"mask\"] = sample[\"mask\"].astype(np.float32)\n sample[\"mask\"] = np.ascontiguousarray(sample[\"mask\"])\n\n if \"disparity\" in sample:\n disparity = sample[\"disparity\"].astype(np.float32)\n sample[\"disparity\"] = np.ascontiguousarray(disparity)\n\n if \"depth\" in sample:\n depth = sample[\"depth\"].astype(np.float32)\n sample[\"depth\"] = np.ascontiguousarray(depth)\n\n return sample" } ]
import cv2 import os import torch import torch.nn as nn from torchvision.transforms import Compose from .midas.dpt_depth import DPTDepthModel from .midas.midas_net import MidasNet from .midas.midas_net_custom import MidasNet_small from .midas.transforms import Resize, NormalizeImage, PrepareForNet from basicsr.utils.download_util import load_file_from_url
4,170
# based on https://github.com/isl-org/MiDaS annotator_ckpts_path = './libs/omnidata_torch/pretrained_models/' ISL_PATHS = { "dpt_beit_large_512": os.path.join(annotator_ckpts_path, "dpt_beit_large_512.pt"), "dpt_beit_large_384": os.path.join(annotator_ckpts_path, "dpt_beit_large_384.pt"), "dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"), "dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"), } remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/dpt_hybrid-midas-501f0c75.pt" def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def load_midas_transform(model_type): # https://github.com/isl-org/MiDaS/blob/master/run.py # load transform only if model_type == "dpt_large": # DPT-Large net_w, net_h = 384, 384 resize_mode = "minimal"
# based on https://github.com/isl-org/MiDaS annotator_ckpts_path = './libs/omnidata_torch/pretrained_models/' ISL_PATHS = { "dpt_beit_large_512": os.path.join(annotator_ckpts_path, "dpt_beit_large_512.pt"), "dpt_beit_large_384": os.path.join(annotator_ckpts_path, "dpt_beit_large_384.pt"), "dpt_large": os.path.join(annotator_ckpts_path, "dpt_large-midas-2f21e586.pt"), "dpt_hybrid": os.path.join(annotator_ckpts_path, "dpt_hybrid-midas-501f0c75.pt"), } remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/dpt_hybrid-midas-501f0c75.pt" def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def load_midas_transform(model_type): # https://github.com/isl-org/MiDaS/blob/master/run.py # load transform only if model_type == "dpt_large": # DPT-Large net_w, net_h = 384, 384 resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
4
2023-12-06 07:29:34+00:00
8k
facebookresearch/DCI
reproduction/crowdsourcing/annotate/preprocessing/preprocess_assets_segev.py
[ { "identifier": "get_groups_simple", "path": "reproduction/crowdsourcing/annotate/preprocessing/mask_creation_utils.py", "snippet": "TARGET_STEP = 100\nSKIP_LOGGING = True\nclass GroupItem(TypedDict):\nclass FinalGroup(TypedDict):\ndef jitter(size: float) -> float:\ndef bound(v, lo, hi):\ndef _load_final_group_from_json(json_dict) -> FinalGroup:\ndef load_final_group_from_json(json_dict) -> FinalGrouping:\ndef get_grid(\n step: int, \n top_left: Point, \n bottom_right: Point, \n noise: Optional[float] = None\n) -> List[Point]:\ndef get_missing_points_greedy(mask: np.ndarray, min_size: int) -> List[Point]:\ndef get_points_from_canny_greedy(\n image: np.ndarray, \n distance_threshold: int = 40, \n jitter_amount: int = 40,\n num_extra: int = 3,\n) -> List[Point]:\ndef predict_all(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n step: int = TARGET_STEP, \n top_left: Optional[Point] = None, \n bottom_right: Optional[Point] = None, \n containing_mask: Optional[np.ndarray] = None\n) -> Dict[Point, List[EfficientMask]]:\ndef predict_for_points(\n predictor: \"SamPredictor\", \n points: List[Point],\n) -> Dict[Point, List[EfficientMask]]:\ndef predict_for_bounded_points(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n points: List[Point], \n mask: EfficientMask,\n) -> Dict[Point, List[EfficientMask]]:\ndef get_canny_masks(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n distance_threshold: int = 40, \n jitter_amount: int = 40\n):\ndef process_best_largest(\n results: Dict[Point, List[EfficientMask]], \n penalty_gap: float = 0.2,\n) -> Dict[Point, Dict[MaskMergeKey, EfficientMask]]:\ndef get_groups(\n processed_results: Dict[Point, Dict[MaskMergeKey, EfficientMask]], \n merge_key: MaskMergeKey = 'best', \n groups: Optional[GroupDict] = None,\n) -> GroupDict:\ndef get_groups_simple(\n sam_results: List[EfficientMask],\n) -> FinalGrouping:\ndef print_groups(groups: FinalGrouping) -> None:\n def _get_group_map(curr_g: FinalGrouping) -> Dict[Union[int, str], Any]:\ndef refine_groups_simple(groups: FinalGrouping, merge_thresh = 0.03) -> FinalGrouping:\ndef first_iteration_groups(\n predictor: \"SamPredictor\",\n processed_results: Dict[Point, Dict[MaskMergeKey, EfficientMask]], \n step: int, \n merge_key: MaskMergeKey = \"largest\",\n) -> GroupDict:\ndef get_subgroup_mask_lists(\n groups: GroupDict, \n base_masks: Dict[Point, List[EfficientMask]], \n canny_masks: Dict[Point, List[EfficientMask]], \n score_cutoff: float = 0.7, \n retain_best: bool = False,\n) -> GroupDict:\ndef compute_subgroups(\n group_mask_item: GroupItem, \n contained_in_thresh: float = 0.90, \n outer_sim_thresh: float = 0.77, \n mutual_sim_thresh: float = 0.85, \n retain_best: bool = False,\n) -> GroupDict:\ndef add_points_in_mask(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n item: GroupItem, \n score_cutoff: float = 0.7,\n num_points = 5,\n) -> GroupItem:\ndef compute_subgroup_recursively(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n group_mask_item: GroupItem, \n score_cutoff: float = 0.7, \n contained_in_thresh: float = 0.90, \n outer_sim_thresh: float = 0.77, \n mutual_sim_thresh: float = 0.85, \n retain_best: bool = False, \n depth: int = 0,\n) -> FinalGroup:\ndef compute_group_tree(\n predictor: \"SamPredictor\", \n image: np.ndarray, \n score_cutoff: float = 0.7, \n contained_in_thresh: float = 0.9, \n outer_sim_thresh: float = 0.8, \n mutual_sim_thresh: float = 0.9, \n retain_best: bool = False,\n) -> FinalGrouping:" }, { "identifier": "EfficientMask", "path": "reproduction/crowdsourcing/annotate/preprocessing/efficient_mask.py", "snippet": "class EfficientMask():\n \"\"\"Class for more efficient mask mask over full numpy ndarrays\"\"\"\n def __init__(self, mask: np.ndarray, score: float, size: Optional[int] = None):\n self.mask = mask\n self.score = score\n self._size: Optional[int] = size\n self._tlbr: Optional[Tuple[Point, Point]] = None\n \n def __repr__(self) -> str:\n return f\"<EM : {self.get_size()}, {self.get_tlbr()}>\"\n \n def _reset_cache(self):\n self._tlbr = None\n self._size = None\n \n def set_to(self, other: \"EfficientMask\"):\n \"\"\"Set this mask's values to that of other\"\"\"\n self.mask = other.mask\n self.score = other.score\n self._size = other._size\n self._tlbr = other._tlbr\n \n def get_tlbr(self) -> Tuple[Point, Point]:\n \"\"\"Return the top left and bottom right bounds of this mask\"\"\"\n if self._tlbr is None:\n try:\n np_where = np.where(self.mask == True)\n left = np.min(np_where[1])\n right = np.max(np_where[1]) + 1\n top = np.min(np_where[0])\n bottom = np.max(np_where[0]) + 1\n except ValueError:\n top, left, bottom, right = (0, 0, 0, 0)\n self._tlbr = ((cast(Ydm, top), cast(Xdm, left)), (cast(Ydm, bottom), cast(Xdm, right)))\n return self._tlbr\n \n def get_size(self) -> int:\n \"\"\"Return the total number of true pixels in this mask\"\"\"\n if self._size is None:\n (top, left), (bottom, right) = self.get_tlbr()\n self._size = np.sum(self.mask[top:bottom,left:right]*1)\n return self._size\n \n def get_density(self) -> float:\n \"\"\"Provide rough density with number of pixels and bbox size\"\"\"\n size = self.get_size()\n (t, l), (b, r) = self.get_tlbr()\n area = (b-t) * (r-l) + 1\n return size / area\n \n def dense_score(self) -> float:\n \"\"\"Return the score times the density, a heuristic for quality\"\"\"\n return self.score * math.sqrt(self.get_density())\n \n def _bbox_overlaps(self, other: \"EfficientMask\") -> bool:\n \"\"\"Check points of opposite diagonals in each other bbox\"\"\"\n (t1, l1), (b1, r1) = self.get_tlbr()\n (t2, l2), (b2, r2) = other.get_tlbr()\n return (\n point_in_box(t1, l1, other.get_tlbr()) or \n point_in_box(b1, r1, other.get_tlbr()) or \n point_in_box(t2, r2, self.get_tlbr()) or \n point_in_box(b2, l2, self.get_tlbr()) \n )\n \n def _get_overlap_submask(self, other: \"EfficientMask\") -> np.ndarray:\n \"\"\"Get a classic ndarray of pixels in the overlap between this and other\"\"\"\n if not self._bbox_overlaps(other):\n return np.array([])\n (t1, l1), (b1, r1) = self.get_tlbr()\n (t2, l2), (b2, r2) = other.get_tlbr()\n maxt, maxl = max(t1, t2), max(l1, l2)\n minb, minr = min(b1, b2), min(r1, r2)\n return (self.mask[maxt:minb,maxl:minr]*1 + other.mask[maxt:minb,maxl:minr]*1 == 2)\n \n def _get_xor_submask(self, other: \"EfficientMask\") -> np.ndarray:\n \"\"\"Get a classic ndarray of pixels in the xor between this and other\"\"\"\n if not self._bbox_overlaps(other):\n return np.array([])\n (t1, l1), (b1, r1) = self.get_tlbr()\n (t2, l2), (b2, r2) = other.get_tlbr()\n mint, minl = min(t1, t2), min(l1, l2)\n maxb, maxr = max(b1, b2), max(r1, r2)\n return (self.mask[mint:maxb,minl:maxr]*1 + other.mask[mint:maxb,minl:maxr]*1 == 1)\n \n def intersect(self, other: \"EfficientMask\") -> \"EfficientMask\":\n \"\"\"Return an efficient mask of the overlap between this and other\"\"\"\n res = np.full(self.mask.shape, False)\n submask = self._get_overlap_submask(other)\n if len(submask) != 0:\n (t1, l1), (b1, r1) = self.get_tlbr()\n (t2, l2), (b2, r2) = other.get_tlbr()\n maxt, maxl = max(t1, t2), max(l1, l2)\n minb, minr = min(b1, b2), min(r1, r2)\n res[maxt:minb,maxl:minr] = submask\n return EfficientMask(res, (self.score + other.score)/2)\n\n def mostly_contained_in(self, out_mask: \"EfficientMask\", thresh: float = 0.95) -> bool:\n \"\"\"Returns True if thresh of self's pixels are in out_mask\"\"\"\n size_in = self.get_size() + 1\n overlap = mask_size(self._get_overlap_submask(out_mask))\n return overlap / size_in > thresh\n \n def overlaps_threshold(self, other: \"EfficientMask\", thresh: float = 0.50) -> bool:\n \"\"\"Returns true if over thresh of either mask is contained in the other\"\"\"\n size_1 = self.get_size() + 1\n size_2 = other.get_size() + 1\n overlap = mask_size(self._get_overlap_submask(other))\n return overlap / size_1 > thresh or overlap / size_2 > thresh\n \n def near_equivalent_to(self, other: \"EfficientMask\", thresh: float = 0.96) -> bool:\n \"\"\"Return true if these two masks have prop overlapping pixels > thresh\"\"\"\n size_1 = self.get_size() + 1\n size_2 = other.get_size() + 1\n if size_1 / size_2 < thresh or size_2 / size_1 < thresh:\n return False\n difference = mask_size(self._get_xor_submask(other))\n if (difference / size_1) > (1-thresh) or (difference / size_2) > (1-thresh):\n return False\n return True\n \n def union(self, other: \"EfficientMask\") -> \"EfficientMask\":\n \"\"\"Return a new efficient mask unioning these\"\"\"\n new_mask = self.mask * 1\n (t2, l2), (b2, r2) = other.get_tlbr()\n new_mask[t2:b2,l2:r2] += other.mask[t2:b2,l2:r2]*1\n return EfficientMask(\n mask=cast(np.ndarray, new_mask > 0),\n score=(self.score + other.score) / 2, # may be more appropriate as weighted mask sizes\n )\n\n def subtract(self, other: \"EfficientMask\") -> \"EfficientMask\":\n \"\"\"Subtract the other mask from this one\"\"\"\n new_mask = self.mask * 1\n (t2, l2), (b2, r2) = other.get_tlbr()\n new_mask[t2:b2,l2:r2] -= other.mask[t2:b2,l2:r2]*1\n return EfficientMask(\n mask=cast(np.ndarray, new_mask == 1),\n score=self.score,\n )" } ]
import time import sys import numpy as np import os import base64 import cv2 import json from segment_anything import sam_model_registry from segment_anything.automatic_mask_generator import SamAutomaticMaskGenerator from .mask_creation_utils import get_groups_simple, refine_groups_simple, FinalGrouping, FinalGroup, get_points_from_canny_greedy from .efficient_mask import EfficientMask from PIL import Image from io import BytesIO from typing import TypedDict, List
4,285
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the CC-BY-NC license found in the # LICENSE file in the root directory of this source tree. LOW = 5000 # Low value into the images array to start at HIGH = 12000 # High value in images array to go to SETEV_MODEL_ROOT = 'FILL_ME' # TODO fill in ANNOTATE_ROOT = os.path.dirname(os.path.dirname(__file__)) SOURCE_DIR = os.path.join(ANNOTATE_ROOT, "assets/images") OUT_DIR = os.path.join(ANNOTATE_ROOT, "assets/masks") class SAMResult(TypedDict): segmentation: np.ndarray # the mask itself bbox: List[float] #XYWH of the mask area: int # area of the mask predicted_iou: float # model predicted quality point_coords: List[List[float]] # coords of this point stability_score: float # model stability score crop_box: List[float] # image crop used to generate this mask, XYWH def fold_group_tree(g: FinalGrouping): def fold_group(subg: FinalGroup): outer_mask = subg['outer_mask'] mask_img = Image.fromarray(np.uint8(outer_mask.mask * 255)) # type: ignore mask_img = mask_img.convert('1') maskbuf = BytesIO() mask_img.save(maskbuf, format='png', bits=1, optimize=True) mask_bytes = maskbuf.getvalue() as_base64 = base64.b64encode(mask_bytes) as_str = as_base64.decode('utf-8') (t, l), (b, r) = subg['outer_mask'].get_tlbr() return { 'outer_mask': as_str, 'area': int(outer_mask.get_size()), 'bounds': ((int(t), int(l)), (int(b), int(r))), 'subgroups': { idx: fold_group(subsubg) for (idx, subsubg) in subg['subgroups'].items() } } return { idx: fold_group(subg) for (idx, subg) in g.items() } def group_outputs(outputs: List[SAMResult]) -> FinalGrouping: as_efficient_masks: List[EfficientMask] = [ EfficientMask( res['segmentation'], res['predicted_iou'] * (res['stability_score'] ** 2), size=res['area'], ) for res in outputs ] in_order = sorted(as_efficient_masks, key=lambda x: x.get_size(), reverse=True) return get_groups_simple(in_order) def main(): all_images = os.listdir(SOURCE_DIR) target_images = all_images[LOW:HIGH] sam_checkpoint = SETEV_MODEL_ROOT model_type = "vit_h" device = "cuda" sam = sam_model_registry[model_type](checkpoint=sam_checkpoint) sam.to(device=device) generator = SamAutomaticMaskGenerator( sam, points_per_side = 50, points_per_batch = 64, pred_iou_thresh = 0.8, stability_score_thresh = 0.94, stability_score_offset = 1.0, box_nms_thresh = 0.97, min_mask_region_area = 1000, output_mode = "binary_mask", ) first_start = time.time() for idx, img in enumerate(target_images): try: start_time = time.time() path = os.path.join(SOURCE_DIR, img) img_array = cv2.imread(path) img_array = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB) canny_points = get_points_from_canny_greedy(img_array, distance_threshold=12, jitter_amount=35, num_extra=8) if len(canny_points) == 0: canny_results = [] print(f"[{time.time() - first_start}] No canny points for image {idx+LOW} : {time.time() - start_time}") else: points_for_sam = np.array([ [pt[1]/img_array.shape[1], pt[0]/img_array.shape[0]] for pt in canny_points ]) canny_generator = SamAutomaticMaskGenerator( sam, points_per_side=None, point_grids=points_for_sam, points_per_batch = 64, pred_iou_thresh = 0.8, stability_score_thresh = 0.94, stability_score_offset = 1.0, box_nms_thresh = 0.97, min_mask_region_area = 1000, output_mode = "binary_mask", ) canny_results = canny_generator.generate(img_array) print(f"[{time.time() - first_start}] SA canny compute time for image {idx+LOW} : {time.time() - start_time}") result = generator.generate(img_array) print(f"[{time.time() - first_start}] SA compute time for image {idx+LOW} : {time.time() - start_time}") result += canny_results grouped = group_outputs(result)
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the CC-BY-NC license found in the # LICENSE file in the root directory of this source tree. LOW = 5000 # Low value into the images array to start at HIGH = 12000 # High value in images array to go to SETEV_MODEL_ROOT = 'FILL_ME' # TODO fill in ANNOTATE_ROOT = os.path.dirname(os.path.dirname(__file__)) SOURCE_DIR = os.path.join(ANNOTATE_ROOT, "assets/images") OUT_DIR = os.path.join(ANNOTATE_ROOT, "assets/masks") class SAMResult(TypedDict): segmentation: np.ndarray # the mask itself bbox: List[float] #XYWH of the mask area: int # area of the mask predicted_iou: float # model predicted quality point_coords: List[List[float]] # coords of this point stability_score: float # model stability score crop_box: List[float] # image crop used to generate this mask, XYWH def fold_group_tree(g: FinalGrouping): def fold_group(subg: FinalGroup): outer_mask = subg['outer_mask'] mask_img = Image.fromarray(np.uint8(outer_mask.mask * 255)) # type: ignore mask_img = mask_img.convert('1') maskbuf = BytesIO() mask_img.save(maskbuf, format='png', bits=1, optimize=True) mask_bytes = maskbuf.getvalue() as_base64 = base64.b64encode(mask_bytes) as_str = as_base64.decode('utf-8') (t, l), (b, r) = subg['outer_mask'].get_tlbr() return { 'outer_mask': as_str, 'area': int(outer_mask.get_size()), 'bounds': ((int(t), int(l)), (int(b), int(r))), 'subgroups': { idx: fold_group(subsubg) for (idx, subsubg) in subg['subgroups'].items() } } return { idx: fold_group(subg) for (idx, subg) in g.items() } def group_outputs(outputs: List[SAMResult]) -> FinalGrouping: as_efficient_masks: List[EfficientMask] = [ EfficientMask( res['segmentation'], res['predicted_iou'] * (res['stability_score'] ** 2), size=res['area'], ) for res in outputs ] in_order = sorted(as_efficient_masks, key=lambda x: x.get_size(), reverse=True) return get_groups_simple(in_order) def main(): all_images = os.listdir(SOURCE_DIR) target_images = all_images[LOW:HIGH] sam_checkpoint = SETEV_MODEL_ROOT model_type = "vit_h" device = "cuda" sam = sam_model_registry[model_type](checkpoint=sam_checkpoint) sam.to(device=device) generator = SamAutomaticMaskGenerator( sam, points_per_side = 50, points_per_batch = 64, pred_iou_thresh = 0.8, stability_score_thresh = 0.94, stability_score_offset = 1.0, box_nms_thresh = 0.97, min_mask_region_area = 1000, output_mode = "binary_mask", ) first_start = time.time() for idx, img in enumerate(target_images): try: start_time = time.time() path = os.path.join(SOURCE_DIR, img) img_array = cv2.imread(path) img_array = cv2.cvtColor(img_array, cv2.COLOR_BGR2RGB) canny_points = get_points_from_canny_greedy(img_array, distance_threshold=12, jitter_amount=35, num_extra=8) if len(canny_points) == 0: canny_results = [] print(f"[{time.time() - first_start}] No canny points for image {idx+LOW} : {time.time() - start_time}") else: points_for_sam = np.array([ [pt[1]/img_array.shape[1], pt[0]/img_array.shape[0]] for pt in canny_points ]) canny_generator = SamAutomaticMaskGenerator( sam, points_per_side=None, point_grids=points_for_sam, points_per_batch = 64, pred_iou_thresh = 0.8, stability_score_thresh = 0.94, stability_score_offset = 1.0, box_nms_thresh = 0.97, min_mask_region_area = 1000, output_mode = "binary_mask", ) canny_results = canny_generator.generate(img_array) print(f"[{time.time() - first_start}] SA canny compute time for image {idx+LOW} : {time.time() - start_time}") result = generator.generate(img_array) print(f"[{time.time() - first_start}] SA compute time for image {idx+LOW} : {time.time() - start_time}") result += canny_results grouped = group_outputs(result)
refined = refine_groups_simple(grouped)
0
2023-12-13 16:16:48+00:00
8k
daswer123/xtts-webui
scripts/resemble_enhance/enhancer/enhancer.py
[ { "identifier": "Normalizer", "path": "scripts/resemble_enhance/common.py", "snippet": "class Normalizer(nn.Module):\n def __init__(self, momentum=0.01, eps=1e-9):\n super().__init__()\n self.momentum = momentum\n self.eps = eps\n self.running_mean_unsafe: Tensor\n self.running_var_unsafe: Tensor\n self.register_buffer(\"running_mean_unsafe\", torch.full([], torch.nan))\n self.register_buffer(\"running_var_unsafe\", torch.full([], torch.nan))\n\n @property\n def started(self):\n return not torch.isnan(self.running_mean_unsafe)\n\n @property\n def running_mean(self):\n if not self.started:\n return torch.zeros_like(self.running_mean_unsafe)\n return self.running_mean_unsafe\n\n @property\n def running_std(self):\n if not self.started:\n return torch.ones_like(self.running_var_unsafe)\n return (self.running_var_unsafe + self.eps).sqrt()\n\n @torch.no_grad()\n def _ema(self, a: Tensor, x: Tensor):\n return (1 - self.momentum) * a + self.momentum * x\n\n def update_(self, x):\n if not self.started:\n self.running_mean_unsafe = x.mean()\n self.running_var_unsafe = x.var()\n else:\n self.running_mean_unsafe = self._ema(self.running_mean_unsafe, x.mean())\n self.running_var_unsafe = self._ema(self.running_var_unsafe, (x - self.running_mean).pow(2).mean())\n\n def forward(self, x: Tensor, update=True):\n if self.training and update:\n self.update_(x)\n self.stats = dict(mean=self.running_mean.item(), std=self.running_std.item())\n x = (x - self.running_mean) / self.running_std\n return x\n\n def inverse(self, x: Tensor):\n return x * self.running_std + self.running_mean" }, { "identifier": "load_denoiser", "path": "scripts/resemble_enhance/denoiser/inference.py", "snippet": "@cache\ndef load_denoiser(run_dir, device):\n if run_dir is None:\n return Denoiser(HParams())\n hp = HParams.load(run_dir)\n denoiser = Denoiser(hp)\n path = run_dir / \"ds\" / \"G\" / \"default\" / \"mp_rank_00_model_states.pt\"\n state_dict = torch.load(path, map_location=\"cpu\")[\"module\"]\n denoiser.load_state_dict(state_dict)\n denoiser.eval()\n denoiser.to(device)\n return denoiser" }, { "identifier": "MelSpectrogram", "path": "scripts/resemble_enhance/melspec.py", "snippet": "class MelSpectrogram(nn.Module):\n def __init__(self, hp: HParams):\n \"\"\"\n Torch implementation of Resemble's mel extraction.\n Note that the values are NOT identical to librosa's implementation\n due to floating point precisions.\n \"\"\"\n super().__init__()\n self.hp = hp\n self.melspec = TorchMelSpectrogram(\n hp.wav_rate,\n n_fft=hp.n_fft,\n win_length=hp.win_size,\n hop_length=hp.hop_size,\n f_min=0,\n f_max=hp.wav_rate // 2,\n n_mels=hp.num_mels,\n power=1,\n normalized=False,\n # NOTE: Folowing librosa's default.\n pad_mode=\"constant\",\n norm=\"slaney\",\n mel_scale=\"slaney\",\n )\n self.register_buffer(\"stft_magnitude_min\", torch.FloatTensor([hp.stft_magnitude_min]))\n self.min_level_db = 20 * np.log10(hp.stft_magnitude_min)\n self.preemphasis = hp.preemphasis\n self.hop_size = hp.hop_size\n\n def forward(self, wav, pad=True):\n \"\"\"\n Args:\n wav: [B, T]\n \"\"\"\n device = wav.device\n if wav.is_mps:\n wav = wav.cpu()\n self.to(wav.device)\n if self.preemphasis > 0:\n wav = torch.nn.functional.pad(wav, [1, 0], value=0)\n wav = wav[..., 1:] - self.preemphasis * wav[..., :-1]\n mel = self.melspec(wav)\n mel = self._amp_to_db(mel)\n mel_normed = self._normalize(mel)\n assert not pad or mel_normed.shape[-1] == 1 + wav.shape[-1] // self.hop_size # Sanity check\n mel_normed = mel_normed.to(device)\n return mel_normed # (M, T)\n\n def _normalize(self, s, headroom_db=15):\n return (s - self.min_level_db) / (-self.min_level_db + headroom_db)\n\n def _amp_to_db(self, x):\n return x.clamp_min(self.hp.stft_magnitude_min).log10() * 20" }, { "identifier": "global_leader_only", "path": "scripts/resemble_enhance/utils/distributed.py", "snippet": "def get_free_port():\ndef fix_unset_envs():\ndef init_distributed():\ndef local_rank():\ndef global_rank():\ndef is_local_leader():\ndef is_global_leader():\ndef leader_only(leader_only_type, fn: Callable | None = None, boardcast_return=False) -> Callable:\n def wrapper(fn):\n def wrapped(*args, **kwargs):" }, { "identifier": "TrainLoop", "path": "scripts/resemble_enhance/utils/train_loop.py", "snippet": "class TrainLoop:\n _ = KW_ONLY\n\n run_dir: Path\n train_dl: DataLoader\n\n load_G: EngineLoader\n feed_G: GenFeeder\n load_D: EngineLoader | None = None\n feed_D: DisFeeder | None = None\n\n update_every: int = 5_000\n eval_every: int = 5_000\n backup_steps: tuple[int, ...] = (5_000, 100_000, 500_000)\n\n device: str = \"cuda\"\n eval_fn: EvalFn | None = None\n gan_training_start_step: int | None = None\n\n @property\n def global_step(self):\n return self.engine_G.global_step # How many steps have been completed?\n\n @property\n def eval_dir(self) -> Path | None:\n if self.eval_every != 0:\n eval_dir = self.run_dir.joinpath(\"eval\")\n eval_dir.mkdir(exist_ok=True)\n else:\n eval_dir = None\n return eval_dir\n\n @property\n def viz_dir(self) -> Path:\n return Path(self.run_dir / \"viz\")\n\n def make_current_step_viz_path(self, name: str, suffix: str) -> Path:\n path = (self.viz_dir / name / f\"{self.global_step}\").with_suffix(suffix)\n path.parent.mkdir(exist_ok=True, parents=True)\n return path\n\n def __post_init__(self):\n engine_G = self.load_G(self.run_dir)\n if self.load_D is None:\n engine_D = None\n else:\n engine_D = self.load_D(self.run_dir)\n self.engine_G = engine_G\n self.engine_D = engine_D\n\n @property\n def model_G(self):\n return self.engine_G.module\n\n @property\n def model_D(self):\n if self.engine_D is None:\n return None\n return self.engine_D.module\n\n def save_checkpoint(self, tag=\"default\"):\n engine_G = self.engine_G\n engine_D = self.engine_D\n engine_G.save_checkpoint(tag=tag)\n if engine_D is not None:\n engine_D.save_checkpoint(tag=tag)\n\n def run(self, max_steps: int = -1):\n self.set_running_loop_(self)\n\n train_dl = self.train_dl\n update_every = self.update_every\n eval_every = self.eval_every\n device = self.device\n eval_fn = self.eval_fn\n\n engine_G = self.engine_G\n engine_D = self.engine_D\n eval_dir = self.eval_dir\n\n init_step = self.global_step\n\n logger.info(f\"\\nTraining from step {init_step} to step {max_steps}\")\n warmup_steps = {init_step + x for x in [50, 100, 500]}\n\n engine_G.train()\n\n if engine_D is not None:\n engine_D.train()\n\n gan_start_step = self.gan_training_start_step\n\n while True:\n loss_G = loss_D = 0\n\n for batch in train_dl:\n torch.cuda.synchronize()\n start_time = time.time()\n\n # What's the step after this batch?\n step = self.global_step + 1\n\n # Send data to the GPU\n batch = tree_map(lambda x: x.to(device) if isinstance(x, Tensor) else x, batch)\n\n stats = {\"step\": step}\n\n # Include step == 1 for sanity check\n gan_started = gan_start_step is not None and (step >= gan_start_step or step == 1)\n gan_started &= engine_D is not None\n\n # Generator step\n fake, losses = self.feed_G(engine=engine_G, batch=batch)\n\n # Train generator\n if gan_started:\n assert engine_D is not None\n assert self.feed_D is not None\n\n # Freeze the discriminator to let gradient go through fake\n engine_D.freeze_()\n losses |= self.feed_D(engine=engine_D, batch=None, fake=fake)\n\n loss_G = sum(losses.values())\n stats |= {f\"G/{k}\": v.item() for k, v in losses.items()}\n stats |= {f\"G/{k}\": v for k, v in engine_G.gather_attribute(\"stats\").items()}\n del losses\n\n assert isinstance(loss_G, Tensor)\n stats[\"G/loss\"] = loss_G.item()\n stats[\"G/lr\"] = engine_G.get_lr()[0]\n stats[\"G/grad_norm\"] = engine_G.get_grad_norm() or 0\n\n if loss_G.isnan().item():\n logger.error(\"Generator loss is NaN, skipping step\")\n continue\n\n engine_G.backward(loss_G)\n engine_G.step()\n\n # Discriminator step\n if gan_started:\n assert engine_D is not None\n assert self.feed_D is not None\n\n engine_D.unfreeze_()\n losses = self.feed_D(engine=engine_D, batch=batch, fake=fake.detach())\n del fake\n\n assert isinstance(losses, dict)\n loss_D = sum(losses.values())\n assert isinstance(loss_D, Tensor)\n\n stats |= {f\"D/{k}\": v.item() for k, v in losses.items()}\n stats |= {f\"D/{k}\": v for k, v in engine_D.gather_attribute(\"stats\").items()}\n del losses\n\n if loss_D.isnan().item():\n logger.error(\"Discriminator loss is NaN, skipping step\")\n continue\n\n engine_D.backward(loss_D)\n engine_D.step()\n\n stats[\"D/loss\"] = loss_D.item()\n stats[\"D/lr\"] = engine_D.get_lr()[0]\n stats[\"D/grad_norm\"] = engine_D.get_grad_norm() or 0\n\n torch.cuda.synchronize()\n stats[\"elapsed_time\"] = time.time() - start_time\n stats = tree_map(lambda x: float(f\"{x:.4g}\") if isinstance(x, float) else x, stats)\n logger.info(json.dumps(stats, indent=0))\n\n command = non_blocking_input()\n\n evaling = step % eval_every == 0 or step in warmup_steps or command.strip() == \"eval\"\n if eval_fn is not None and is_global_leader() and eval_dir is not None and evaling:\n engine_G.eval()\n eval_fn(engine_G, eval_dir=eval_dir)\n engine_G.train()\n\n if command.strip() == \"quit\":\n logger.info(\"Training paused\")\n self.save_checkpoint(\"default\")\n return\n\n if command.strip() == \"backup\" or step in self.backup_steps:\n logger.info(\"Backing up\")\n self.save_checkpoint(tag=f\"backup_{step:07d}\")\n\n if step % update_every == 0 or command.strip() == \"save\":\n self.save_checkpoint(tag=\"default\")\n\n if step == max_steps:\n logger.info(\"Training finished\")\n self.save_checkpoint(tag=\"default\")\n return\n\n @classmethod\n def set_running_loop_(cls, loop):\n assert isinstance(loop, cls), f\"Expected {cls}, got {type(loop)}\"\n cls._running_loop: cls = loop\n\n @classmethod\n def get_running_loop(cls) -> \"TrainLoop | None\":\n if hasattr(cls, \"_running_loop\"):\n assert isinstance(cls._running_loop, cls)\n return cls._running_loop\n return None\n\n @classmethod\n def get_running_loop_global_step(cls) -> int | None:\n if loop := cls.get_running_loop():\n return loop.global_step\n return None\n\n @classmethod\n def get_running_loop_viz_path(cls, name: str, suffix: str) -> Path | None:\n if loop := cls.get_running_loop():\n return loop.make_current_step_viz_path(name, suffix)\n return None" }, { "identifier": "HParams", "path": "scripts/resemble_enhance/enhancer/hparams.py", "snippet": "class HParams(HParamsBase):\n cfm_solver_method: str = \"midpoint\"\n cfm_solver_nfe: int = 64\n cfm_time_mapping_divisor: int = 4\n univnet_nc: int = 96\n\n lcfm_latent_dim: int = 64\n lcfm_training_mode: str = \"ae\"\n lcfm_z_scale: float = 5\n\n vocoder_extra_dim: int = 32\n\n gan_training_start_step: int | None = 5_000\n enhancer_stage1_run_dir: Path | None = None\n\n denoiser_run_dir: Path | None = None" }, { "identifier": "IRMAE", "path": "scripts/resemble_enhance/enhancer/lcfm/irmae.py", "snippet": "class IRMAE(nn.Module):\n def __init__(\n self,\n input_dim,\n output_dim,\n latent_dim,\n hidden_dim=1024,\n num_irms=4,\n ):\n \"\"\"\n Args:\n input_dim: input dimension\n output_dim: output dimension\n latent_dim: latent dimension\n hidden_dim: hidden layer dimension\n num_irm_matrics: number of implicit rank minimization matrices\n norm: normalization layer\n \"\"\"\n self.input_dim = input_dim\n super().__init__()\n\n self.encoder = nn.Sequential(\n nn.Conv1d(input_dim, hidden_dim, 3, padding=\"same\"),\n *[ResBlock(hidden_dim) for _ in range(4)],\n # Try to obtain compact representation (https://proceedings.neurips.cc/paper/2020/file/a9078e8653368c9c291ae2f8b74012e7-Paper.pdf)\n *[nn.Conv1d(hidden_dim if i == 0 else latent_dim, latent_dim, 1, bias=False) for i in range(num_irms)],\n nn.Tanh(),\n )\n\n self.decoder = nn.Sequential(\n nn.Conv1d(latent_dim, hidden_dim, 3, padding=\"same\"),\n *[ResBlock(hidden_dim) for _ in range(4)],\n nn.Conv1d(hidden_dim, output_dim, 1),\n )\n\n self.head = nn.Sequential(\n nn.Conv1d(output_dim, hidden_dim, 3, padding=\"same\"),\n nn.GELU(),\n nn.Conv1d(hidden_dim, input_dim, 1),\n )\n\n self.estimator = Normalizer()\n\n def encode(self, x):\n \"\"\"\n Args:\n x: (b c t) tensor\n \"\"\"\n z = self.encoder(x) # (b c t)\n _ = self.estimator(z) # Estimate the glboal mean and std of z\n self.stats = {}\n self.stats[\"z_mean\"] = z.mean().item()\n self.stats[\"z_std\"] = z.std().item()\n self.stats[\"z_abs_68\"] = z.abs().quantile(0.6827).item()\n self.stats[\"z_abs_95\"] = z.abs().quantile(0.9545).item()\n self.stats[\"z_abs_99\"] = z.abs().quantile(0.9973).item()\n return z\n\n def decode(self, z):\n \"\"\"\n Args:\n z: (b c t) tensor\n \"\"\"\n return self.decoder(z)\n\n def forward(self, x, skip_decoding=False):\n \"\"\"\n Args:\n x: (b c t) tensor\n skip_decoding: if True, skip the decoding step\n \"\"\"\n z = self.encode(x) # q(z|x)\n\n if skip_decoding:\n # This speeds up the training in cfm only mode\n decoded = None\n else:\n decoded = self.decode(z) # p(x|z)\n predicted = self.head(decoded)\n self.losses = dict(mse=F.mse_loss(predicted, x))\n\n return IRMAEOutput(latent=z, decoded=decoded)" }, { "identifier": "CFM", "path": "scripts/resemble_enhance/enhancer/lcfm/lcfm.py", "snippet": "CFM = \"cfm\"" }, { "identifier": "LCFM", "path": "scripts/resemble_enhance/enhancer/lcfm/lcfm.py", "snippet": "class LCFM(nn.Module):\n class Mode(Enum):\n AE = \"ae\"\n CFM = \"cfm\"\n\n def __init__(self, ae: IRMAE, cfm: CFM, z_scale: float = 1.0):\n super().__init__()\n self.ae = ae\n self.cfm = cfm\n self.z_scale = z_scale\n self._mode = None\n self._eval_tau = 0.5\n\n @property\n def mode(self):\n return self._mode\n\n def set_mode_(self, mode):\n mode = self.Mode(mode)\n self._mode = mode\n\n if mode == mode.AE:\n freeze_(self.cfm)\n logger.info(\"Freeze cfm\")\n elif mode == mode.CFM:\n freeze_(self.ae)\n logger.info(\"Freeze ae (encoder and decoder)\")\n else:\n raise ValueError(f\"Unknown training mode: {mode}\")\n\n def get_running_train_loop(self):\n try:\n # Lazy import\n from ...utils.train_loop import TrainLoop\n\n return TrainLoop.get_running_loop()\n except ImportError:\n return None\n\n @property\n def global_step(self):\n loop = self.get_running_train_loop()\n if loop is None:\n return None\n return loop.global_step\n\n @torch.no_grad()\n def _visualize(self, x, y, y_):\n loop = self.get_running_train_loop()\n if loop is None:\n return\n\n plt.subplot(221)\n plt.imshow(y[0].detach().cpu().numpy(), aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.title(\"GT\")\n\n plt.subplot(222)\n y_ = y_[:, : y.shape[1]]\n plt.imshow(y_[0].detach().cpu().numpy(), aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.title(\"Posterior\")\n\n plt.subplot(223)\n z_ = self.cfm(x)\n y__ = self.ae.decode(z_)\n y__ = y__[:, : y.shape[1]]\n plt.imshow(y__[0].detach().cpu().numpy(), aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.title(\"C-Prior\")\n del y__\n\n plt.subplot(224)\n z_ = torch.randn_like(z_)\n y__ = self.ae.decode(z_)\n y__ = y__[:, : y.shape[1]]\n plt.imshow(y__[0].detach().cpu().numpy(), aspect=\"auto\", origin=\"lower\", interpolation=\"none\")\n plt.title(\"Prior\")\n del z_, y__\n\n path = loop.make_current_step_viz_path(\"recon\", \".png\")\n path.parent.mkdir(exist_ok=True, parents=True)\n plt.tight_layout()\n plt.savefig(path, dpi=500)\n plt.close()\n\n def _scale(self, z: Tensor):\n return z * self.z_scale\n\n def _unscale(self, z: Tensor):\n return z / self.z_scale\n\n def eval_tau_(self, tau):\n self._eval_tau = tau\n\n def forward(self, x, y: Tensor | None = None, ψ0: Tensor | None = None):\n \"\"\"\n Args:\n x: (b d t), condition mel\n y: (b d t), target mel\n ψ0: (b d t), starting mel\n \"\"\"\n if self.mode == self.Mode.CFM:\n self.ae.eval() # Always set to eval when training cfm\n\n if ψ0 is not None:\n ψ0 = self._scale(self.ae.encode(ψ0))\n if self.training:\n tau = torch.rand_like(ψ0[:, :1, :1])\n else:\n tau = self._eval_tau\n ψ0 = tau * torch.randn_like(ψ0) + (1 - tau) * ψ0\n\n if y is None:\n if self.mode == self.Mode.AE:\n with torch.no_grad():\n training = self.ae.training\n self.ae.eval()\n z = self.ae.encode(x)\n self.ae.train(training)\n else:\n z = self._unscale(self.cfm(x, ψ0=ψ0))\n\n h = self.ae.decode(z)\n else:\n ae_output: IRMAEOutput = self.ae(y, skip_decoding=self.mode == self.Mode.CFM)\n\n if self.mode == self.Mode.CFM:\n _ = self.cfm(x, self._scale(ae_output.latent.detach()), ψ0=ψ0)\n\n h = ae_output.decoded\n\n if h is not None and self.global_step is not None and self.global_step % 100 == 0:\n self._visualize(x[:1], y[:1], h[:1])\n\n return h" }, { "identifier": "UnivNet", "path": "scripts/resemble_enhance/enhancer/univnet/univnet.py", "snippet": "class UnivNet(nn.Module):\n @property\n def d_noise(self):\n return 128\n\n @property\n def strides(self):\n return [7, 5, 4, 3]\n\n @property\n def dilations(self):\n return [1, 3, 9, 27]\n\n @property\n def nc(self):\n return self.hp.univnet_nc\n\n @property\n def scale_factor(self) -> int:\n return self.hp.hop_size\n\n def __init__(self, hp: HParams, d_input):\n super().__init__()\n self.d_input = d_input\n\n self.hp = hp\n\n self.blocks = nn.ModuleList(\n [\n LVCBlock(\n self.nc,\n d_input,\n stride=stride,\n dilations=self.dilations,\n cond_hop_length=hop_length,\n kpnet_conv_size=3,\n )\n for stride, hop_length in zip(self.strides, np.cumprod(self.strides))\n ]\n )\n\n self.conv_pre = weight_norm(nn.Conv1d(self.d_noise, self.nc, 7, padding=3, padding_mode=\"reflect\"))\n\n self.conv_post = nn.Sequential(\n nn.LeakyReLU(0.2),\n weight_norm(nn.Conv1d(self.nc, 1, 7, padding=3, padding_mode=\"reflect\")),\n nn.Tanh(),\n )\n\n self.mrstft = MRSTFTLoss(hp)\n\n @property\n def eps(self):\n return 1e-5\n\n def forward(self, x: Tensor, y: Tensor | None = None, npad=10):\n \"\"\"\n Args:\n x: (b c t), acoustic features\n y: (b t), waveform\n Returns:\n z: (b t), waveform\n \"\"\"\n assert x.ndim == 3, \"x must be 3D tensor\"\n assert y is None or y.ndim == 2, \"y must be 2D tensor\"\n assert x.shape[1] == self.d_input, f\"x.shape[1] must be {self.d_input}, but got {x.shape}\"\n assert npad >= 0, \"npad must be positive or zero\"\n\n x = F.pad(x, (0, npad), \"constant\", 0)\n z = torch.randn(x.shape[0], self.d_noise, x.shape[2]).to(x)\n z = self.conv_pre(z) # (b c t)\n\n for block in self.blocks:\n z = block(z, x) # (b c t)\n\n z = self.conv_post(z) # (b 1 t)\n z = z[..., : -self.scale_factor * npad]\n z = z.squeeze(1) # (b t)\n\n if y is not None:\n self.losses = self.mrstft(z, y)\n\n return z" } ]
import logging import matplotlib.pyplot as plt import pandas as pd import torch from torch import Tensor, nn from torch.distributions import Beta from ..common import Normalizer from ..denoiser.inference import load_denoiser from ..melspec import MelSpectrogram from ..utils.distributed import global_leader_only from ..utils.train_loop import TrainLoop from .hparams import HParams from .lcfm import CFM, IRMAE, LCFM from .univnet import UnivNet
7,099
logger = logging.getLogger(__name__) def _maybe(fn): def _fn(*args): if args[0] is None: return None return fn(*args) return _fn def _normalize_wav(x: Tensor): return x / (x.abs().max(dim=-1, keepdim=True).values + 1e-7) class Enhancer(nn.Module): def __init__(self, hp: HParams): super().__init__() self.hp = hp n_mels = self.hp.num_mels vocoder_input_dim = n_mels + self.hp.vocoder_extra_dim latent_dim = self.hp.lcfm_latent_dim self.lcfm = LCFM( IRMAE( input_dim=n_mels, output_dim=vocoder_input_dim, latent_dim=latent_dim, ), CFM( cond_dim=n_mels, output_dim=self.hp.lcfm_latent_dim, solver_nfe=self.hp.cfm_solver_nfe, solver_method=self.hp.cfm_solver_method, time_mapping_divisor=self.hp.cfm_time_mapping_divisor, ), z_scale=self.hp.lcfm_z_scale, ) self.lcfm.set_mode_(self.hp.lcfm_training_mode) self.mel_fn = MelSpectrogram(hp) self.vocoder = UnivNet(self.hp, vocoder_input_dim) self.denoiser = load_denoiser(self.hp.denoiser_run_dir, "cpu") self.normalizer = Normalizer() self._eval_lambd = 0.0 self.dummy: Tensor self.register_buffer("dummy", torch.zeros(1)) if self.hp.enhancer_stage1_run_dir is not None: pretrained_path = self.hp.enhancer_stage1_run_dir / "ds/G/default/mp_rank_00_model_states.pt" self._load_pretrained(pretrained_path) logger.info(f"{self.__class__.__name__} summary") logger.info(f"{self.summarize()}") def _load_pretrained(self, path): # Clone is necessary as otherwise it holds a reference to the original model cfm_state_dict = {k: v.clone() for k, v in self.lcfm.cfm.state_dict().items()} denoiser_state_dict = {k: v.clone() for k, v in self.denoiser.state_dict().items()} state_dict = torch.load(path, map_location="cpu")["module"] self.load_state_dict(state_dict, strict=False) self.lcfm.cfm.load_state_dict(cfm_state_dict) # Reset cfm self.denoiser.load_state_dict(denoiser_state_dict) # Reset denoiser logger.info(f"Loaded pretrained model from {path}") def summarize(self): npa_train = lambda m: sum(p.numel() for p in m.parameters() if p.requires_grad) npa = lambda m: sum(p.numel() for p in m.parameters()) rows = [] for name, module in self.named_children(): rows.append(dict(name=name, trainable=npa_train(module), total=npa(module))) rows.append(dict(name="total", trainable=npa_train(self), total=npa(self))) df = pd.DataFrame(rows) return df.to_markdown(index=False) def to_mel(self, x: Tensor, drop_last=True): """ Args: x: (b t), wavs Returns: o: (b c t), mels """ if drop_last: return self.mel_fn(x)[..., :-1] # (b d t) return self.mel_fn(x) @global_leader_only @torch.no_grad() def _visualize(self, original_mel, denoised_mel):
logger = logging.getLogger(__name__) def _maybe(fn): def _fn(*args): if args[0] is None: return None return fn(*args) return _fn def _normalize_wav(x: Tensor): return x / (x.abs().max(dim=-1, keepdim=True).values + 1e-7) class Enhancer(nn.Module): def __init__(self, hp: HParams): super().__init__() self.hp = hp n_mels = self.hp.num_mels vocoder_input_dim = n_mels + self.hp.vocoder_extra_dim latent_dim = self.hp.lcfm_latent_dim self.lcfm = LCFM( IRMAE( input_dim=n_mels, output_dim=vocoder_input_dim, latent_dim=latent_dim, ), CFM( cond_dim=n_mels, output_dim=self.hp.lcfm_latent_dim, solver_nfe=self.hp.cfm_solver_nfe, solver_method=self.hp.cfm_solver_method, time_mapping_divisor=self.hp.cfm_time_mapping_divisor, ), z_scale=self.hp.lcfm_z_scale, ) self.lcfm.set_mode_(self.hp.lcfm_training_mode) self.mel_fn = MelSpectrogram(hp) self.vocoder = UnivNet(self.hp, vocoder_input_dim) self.denoiser = load_denoiser(self.hp.denoiser_run_dir, "cpu") self.normalizer = Normalizer() self._eval_lambd = 0.0 self.dummy: Tensor self.register_buffer("dummy", torch.zeros(1)) if self.hp.enhancer_stage1_run_dir is not None: pretrained_path = self.hp.enhancer_stage1_run_dir / "ds/G/default/mp_rank_00_model_states.pt" self._load_pretrained(pretrained_path) logger.info(f"{self.__class__.__name__} summary") logger.info(f"{self.summarize()}") def _load_pretrained(self, path): # Clone is necessary as otherwise it holds a reference to the original model cfm_state_dict = {k: v.clone() for k, v in self.lcfm.cfm.state_dict().items()} denoiser_state_dict = {k: v.clone() for k, v in self.denoiser.state_dict().items()} state_dict = torch.load(path, map_location="cpu")["module"] self.load_state_dict(state_dict, strict=False) self.lcfm.cfm.load_state_dict(cfm_state_dict) # Reset cfm self.denoiser.load_state_dict(denoiser_state_dict) # Reset denoiser logger.info(f"Loaded pretrained model from {path}") def summarize(self): npa_train = lambda m: sum(p.numel() for p in m.parameters() if p.requires_grad) npa = lambda m: sum(p.numel() for p in m.parameters()) rows = [] for name, module in self.named_children(): rows.append(dict(name=name, trainable=npa_train(module), total=npa(module))) rows.append(dict(name="total", trainable=npa_train(self), total=npa(self))) df = pd.DataFrame(rows) return df.to_markdown(index=False) def to_mel(self, x: Tensor, drop_last=True): """ Args: x: (b t), wavs Returns: o: (b c t), mels """ if drop_last: return self.mel_fn(x)[..., :-1] # (b d t) return self.mel_fn(x) @global_leader_only @torch.no_grad() def _visualize(self, original_mel, denoised_mel):
loop = TrainLoop.get_running_loop()
4
2023-12-14 06:34:12+00:00
8k
FrozenBurning/PrimDiffusion
primdiffusion/model/attcond_smpl_model.py
[ { "identifier": "make_postex", "path": "dva/geom.py", "snippet": "def make_postex(v, idxim, barim):\n return (\n barim[None, :, :, 0, None] * v[:, idxim[:, :, 0]]\n + barim[None, :, :, 1, None] * v[:, idxim[:, :, 1]]\n + barim[None, :, :, 2, None] * v[:, idxim[:, :, 2]]\n ).permute(0, 3, 1, 2)" }, { "identifier": "compute_tbn", "path": "dva/geom.py", "snippet": "def compute_tbn(geom, vt, vi, vti):\n \"\"\"Computes tangent, bitangent, and normal vectors given a mesh.\n Args:\n geom: [N, n_verts, 3] th.Tensor\n Vertex positions.\n vt: [n_uv_coords, 2] th.Tensor\n UV coordinates.\n vi: [..., 3] th.Tensor\n Face vertex indices.\n vti: [..., 3] th.Tensor\n Face UV indices.\n Returns:\n [..., 3] th.Tensors for T, B, N.\n \"\"\"\n\n v0 = geom[:, vi[..., 0]]\n v1 = geom[:, vi[..., 1]]\n v2 = geom[:, vi[..., 2]]\n vt0 = vt[vti[..., 0]]\n vt1 = vt[vti[..., 1]]\n vt2 = vt[vti[..., 2]]\n\n v01 = v1 - v0\n v02 = v2 - v0\n vt01 = vt1 - vt0\n vt02 = vt2 - vt0\n f = 1.0 / (\n vt01[None, ..., 0] * vt02[None, ..., 1]\n - vt01[None, ..., 1] * vt02[None, ..., 0]\n )\n tangent = f[..., None] * th.stack(\n [\n v01[..., 0] * vt02[None, ..., 1] - v02[..., 0] * vt01[None, ..., 1],\n v01[..., 1] * vt02[None, ..., 1] - v02[..., 1] * vt01[None, ..., 1],\n v01[..., 2] * vt02[None, ..., 1] - v02[..., 2] * vt01[None, ..., 1],\n ],\n dim=-1,\n )\n tangent = F.normalize(tangent, dim=-1)\n normal = F.normalize(th.cross(v01, v02, dim=3), dim=-1)\n bitangent = F.normalize(th.cross(tangent, normal, dim=3), dim=-1)\n\n return tangent, bitangent, normal" }, { "identifier": "axisangle_to_matrix", "path": "dva/geom.py", "snippet": "def axisangle_to_matrix(rvec):\n theta = th.sqrt(1e-5 + th.sum(rvec**2, dim=-1))\n rvec = rvec / theta[..., None]\n costh = th.cos(theta)\n sinth = th.sin(theta)\n return th.stack(\n (\n th.stack(\n (\n rvec[..., 0] ** 2 + (1.0 - rvec[..., 0] ** 2) * costh,\n rvec[..., 0] * rvec[..., 1] * (1.0 - costh) - rvec[..., 2] * sinth,\n rvec[..., 0] * rvec[..., 2] * (1.0 - costh) + rvec[..., 1] * sinth,\n ),\n dim=-1,\n ),\n th.stack(\n (\n rvec[..., 0] * rvec[..., 1] * (1.0 - costh) + rvec[..., 2] * sinth,\n rvec[..., 1] ** 2 + (1.0 - rvec[..., 1] ** 2) * costh,\n rvec[..., 1] * rvec[..., 2] * (1.0 - costh) - rvec[..., 0] * sinth,\n ),\n dim=-1,\n ),\n th.stack(\n (\n rvec[..., 0] * rvec[..., 2] * (1.0 - costh) - rvec[..., 1] * sinth,\n rvec[..., 1] * rvec[..., 2] * (1.0 - costh) + rvec[..., 0] * sinth,\n rvec[..., 2] ** 2 + (1.0 - rvec[..., 2] ** 2) * costh,\n ),\n dim=-1,\n ),\n ),\n dim=-2,\n )" }, { "identifier": "project_points_multi", "path": "dva/geom.py", "snippet": "def project_points_multi(p, Rt, K, normalize=False, size=None):\n \"\"\"Project a set of 3D points into multiple cameras with a pinhole model.\n Args:\n p: [B, N, 3], input 3D points in world coordinates\n Rt: [B, NC, 3, 4], extrinsics (where NC is the number of cameras to project to)\n K: [B, NC, 3, 3], intrinsics\n normalize: bool, whether to normalize coordinates to [-1.0, 1.0]\n Returns:\n tuple:\n - [B, NC, N, 2] - projected points\n - [B, NC, N] - their\n \"\"\"\n B, N = p.shape[:2]\n NC = Rt.shape[1]\n\n Rt = Rt.reshape(B * NC, 3, 4)\n K = K.reshape(B * NC, 3, 3)\n\n # [B, N, 3] -> [B * NC, N, 3]\n p = p[:, np.newaxis].expand(-1, NC, -1, -1).reshape(B * NC, -1, 3)\n p_cam = p @ Rt[:, :3, :3].transpose(-2, -1) + Rt[:, :3, 3][:, np.newaxis]\n p_pix = p_cam @ K.transpose(-2, -1)\n p_depth = p_pix[:, :, 2:]\n p_pix = (p_pix[..., :2] / p_depth).reshape(B, NC, N, 2)\n p_depth = p_depth.reshape(B, NC, N)\n\n if normalize:\n assert size is not None\n h, w = size\n p_pix = (\n 2.0 * p_pix / th.as_tensor([w, h], dtype=th.float32, device=p.device) - 1.0\n )\n return p_pix, p_depth" }, { "identifier": "GeometryModule", "path": "dva/geom.py", "snippet": "class GeometryModule(nn.Module):\n def __init__(\n self,\n vi,\n vt,\n vti,\n v2uv,\n uv_size,\n flip_uv=False,\n impaint=False,\n impaint_threshold=100.0,\n ):\n super().__init__()\n\n self.register_buffer(\"vi\", th.as_tensor(vi))\n self.register_buffer(\"vt\", th.as_tensor(vt))\n self.register_buffer(\"vti\", th.as_tensor(vti))\n self.register_buffer(\"v2uv\", th.as_tensor(v2uv, dtype=th.int64))\n\n # TODO: should we just pass topology here?\n self.n_verts = v2uv.shape[0]\n\n self.uv_size = uv_size\n\n # TODO: can't we just index face_index?\n index_image = make_uv_vert_index(\n self.vt, self.vi, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n ).cpu()\n face_index, bary_image = make_uv_barys(\n self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n if impaint:\n if uv_size >= 1024:\n logger.info(\n \"impainting index image might take a while for sizes >= 1024\"\n )\n\n index_image, bary_image = index_image_impaint(\n index_image, bary_image, impaint_threshold\n )\n # TODO: we can avoid doing this 2x\n face_index = index_image_impaint(\n face_index, distance_threshold=impaint_threshold\n )\n\n self.register_buffer(\"index_image\", index_image.cpu())\n self.register_buffer(\"bary_image\", bary_image.cpu())\n self.register_buffer(\"face_index_image\", face_index.cpu())\n\n def render_index_images(self, uv_size, flip_uv=False, impaint=False):\n index_image = make_uv_vert_index(\n self.vt, self.vi, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n face_image, bary_image = make_uv_barys(\n self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n\n if impaint:\n index_image, bary_image = index_image_impaint(\n index_image,\n bary_image,\n )\n\n return index_image, face_image, bary_image\n\n def vn(self, verts):\n return vert_normals(verts, self.vi[np.newaxis].to(th.long))\n\n def to_uv(self, values):\n return values_to_uv(values, self.index_image, self.bary_image)\n\n def from_uv(self, values_uv):\n # TODO: we need to sample this\n return sample_uv(values_uv, self.vt, self.v2uv.to(th.long))" }, { "identifier": "ConvBlock", "path": "dva/layers.py", "snippet": "class ConvBlock(nn.Module):\n def __init__(\n self,\n in_channels,\n out_channels,\n size,\n lrelu_slope=0.2,\n kernel_size=3,\n padding=1,\n wnorm_dim=0,\n ):\n super().__init__()\n\n self.conv_resize = Conv2dWN(in_channels, out_channels, kernel_size=1)\n self.conv1 = Conv2dWNUB(\n in_channels,\n in_channels,\n kernel_size=kernel_size,\n padding=padding,\n height=size,\n width=size,\n )\n\n self.lrelu1 = nn.LeakyReLU(lrelu_slope)\n self.conv2 = Conv2dWNUB(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n padding=padding,\n height=size,\n width=size,\n )\n self.lrelu2 = nn.LeakyReLU(lrelu_slope)\n\n def forward(self, x):\n x_skip = self.conv_resize(x)\n x = self.conv1(x)\n x = self.lrelu1(x)\n x = self.conv2(x)\n x = self.lrelu2(x)\n return x + x_skip" }, { "identifier": "tile2d", "path": "dva/layers.py", "snippet": "def tile2d(x, size: int):\n \"\"\"Tile a given set of features into a convolutional map.\n\n Args:\n x: float tensor of shape [N, F]\n size: int or a tuple\n\n Returns:\n a feature map [N, F, size[0], size[1]]\n \"\"\"\n # size = size if isinstance(size, tuple) else (size, size)\n # NOTE: expecting only int here (!!!)\n return x[:, :, np.newaxis, np.newaxis].expand(-1, -1, size, size)" }, { "identifier": "SpatialTransformer", "path": "primdiffusion/model/transformer.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None):\n super().__init__()\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)\n for d in range(depth)]\n )\n\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n for block in self.transformer_blocks:\n x = block(x, context=context)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n x = self.proj_out(x)\n return x + x_in" } ]
import torch as th import torch.nn as nn import torch.nn.functional as F import numpy as np import dva.layers as la import logging from dva.geom import ( make_postex, compute_tbn, axisangle_to_matrix, project_points_multi, GeometryModule, ) from dva.layers import ConvBlock, tile2d from primdiffusion.model.transformer import SpatialTransformer from pytorch3d.renderer.mesh.rasterizer import MeshRasterizer, RasterizationSettings from pytorch3d.utils.camera_conversions import cameras_from_opencv_projection from pytorch3d.structures import Meshes from easymocap.smplmodel import SMPLlayer
4,036
logger = logging.getLogger(__name__) def init_primitives(slab_size, n_prims, lbs_fn, geo_fn, ref_frame, scale=15000.0): stride = slab_size // int(n_prims**0.5) device = geo_fn.vt.device _, face_index_imp, bary_index_imp = geo_fn.render_index_images( slab_size, impaint=True ) bary_index_imp = th.as_tensor(bary_index_imp, device=device) prim_bary_img = bary_index_imp[stride // 2 :: stride, stride // 2 :: stride] prim_vidx_img = geo_fn.vi[ face_index_imp[stride // 2 :: stride, stride // 2 :: stride] ] prim_vtidx_img = geo_fn.vti[ face_index_imp[stride // 2 :: stride, stride // 2 :: stride] ] # getting actual geometrical coordinates ref_frame = { "poses": th.as_tensor(ref_frame["poses"]), "shapes": th.as_tensor(ref_frame["shapes"]), "Rh": th.as_tensor(ref_frame["Rh"]), "Th": th.as_tensor(ref_frame["Th"]), } # convert to mm geom = lbs_fn(**ref_frame) * 1000.0 prim_pos_mesh = ( make_postex(geom, prim_vidx_img, prim_bary_img) .permute(0, 2, 3, 1) .reshape(n_prims, 3) ) distance = th.cdist(prim_pos_mesh, prim_pos_mesh) # get a small neigbhourhood around nbs_dists = th.topk(distance, k=24, largest=False).values[:, 1:].mean(dim=-1) nbs_dists = nbs_dists.clip(5.0, 50.0) prim_scale = scale * (1.0 / nbs_dists) return prim_vidx_img, prim_vtidx_img, prim_bary_img, prim_scale, geom class BodyDecoder(nn.Module): def __init__( self, assets, n_prims, prim_size, n_pose_dims, n_pose_enc_channels, n_embs_channels=64, prim_motion_enabled=False, prim_motion_start_train=100, prim_rt_enabled=True, n_init_channels=64, uv_size=512, smpl_gender="neutral", image_height=1024, image_width=1024, ): super().__init__() self.uv_size = uv_size self.lbs_fn = SMPLlayer( assets.smpl_path, model_type="smpl", gender=smpl_gender, ) # initializing primitives self.n_prims = n_prims self.n_prims_x = int(n_prims**0.5) self.n_prims_y = int(n_prims**0.5) self.prim_size = prim_size self.slab_size = int(n_prims**0.5 * prim_size) logger.info( f"slab_size={self.slab_size}, prim_size={self.prim_size}, n_prims={self.n_prims}" ) self.prim_motion_enabled = prim_motion_enabled self.prim_motion_start_train = prim_motion_start_train self.prim_rt_enabled = prim_rt_enabled logger.info("initializing geometry module...")
logger = logging.getLogger(__name__) def init_primitives(slab_size, n_prims, lbs_fn, geo_fn, ref_frame, scale=15000.0): stride = slab_size // int(n_prims**0.5) device = geo_fn.vt.device _, face_index_imp, bary_index_imp = geo_fn.render_index_images( slab_size, impaint=True ) bary_index_imp = th.as_tensor(bary_index_imp, device=device) prim_bary_img = bary_index_imp[stride // 2 :: stride, stride // 2 :: stride] prim_vidx_img = geo_fn.vi[ face_index_imp[stride // 2 :: stride, stride // 2 :: stride] ] prim_vtidx_img = geo_fn.vti[ face_index_imp[stride // 2 :: stride, stride // 2 :: stride] ] # getting actual geometrical coordinates ref_frame = { "poses": th.as_tensor(ref_frame["poses"]), "shapes": th.as_tensor(ref_frame["shapes"]), "Rh": th.as_tensor(ref_frame["Rh"]), "Th": th.as_tensor(ref_frame["Th"]), } # convert to mm geom = lbs_fn(**ref_frame) * 1000.0 prim_pos_mesh = ( make_postex(geom, prim_vidx_img, prim_bary_img) .permute(0, 2, 3, 1) .reshape(n_prims, 3) ) distance = th.cdist(prim_pos_mesh, prim_pos_mesh) # get a small neigbhourhood around nbs_dists = th.topk(distance, k=24, largest=False).values[:, 1:].mean(dim=-1) nbs_dists = nbs_dists.clip(5.0, 50.0) prim_scale = scale * (1.0 / nbs_dists) return prim_vidx_img, prim_vtidx_img, prim_bary_img, prim_scale, geom class BodyDecoder(nn.Module): def __init__( self, assets, n_prims, prim_size, n_pose_dims, n_pose_enc_channels, n_embs_channels=64, prim_motion_enabled=False, prim_motion_start_train=100, prim_rt_enabled=True, n_init_channels=64, uv_size=512, smpl_gender="neutral", image_height=1024, image_width=1024, ): super().__init__() self.uv_size = uv_size self.lbs_fn = SMPLlayer( assets.smpl_path, model_type="smpl", gender=smpl_gender, ) # initializing primitives self.n_prims = n_prims self.n_prims_x = int(n_prims**0.5) self.n_prims_y = int(n_prims**0.5) self.prim_size = prim_size self.slab_size = int(n_prims**0.5 * prim_size) logger.info( f"slab_size={self.slab_size}, prim_size={self.prim_size}, n_prims={self.n_prims}" ) self.prim_motion_enabled = prim_motion_enabled self.prim_motion_start_train = prim_motion_start_train self.prim_rt_enabled = prim_rt_enabled logger.info("initializing geometry module...")
self.geo_fn = GeometryModule(
4
2023-12-06 05:12:55+00:00
8k
LSimon95/megatts2
prepare_ds.py
[ { "identifier": "TextTokenizer", "path": "modules/tokenizer.py", "snippet": "class TextTokenizer:\n def __init__(self) -> None:\n\n self.separator = Separator(word=\"_\", syllable=\"-\", phone=\"|\")\n self.pinyin2lty = get_pinyin2lty()\n\n def phonemize(self, text: str) -> str:\n text = re.sub(r'[^\\w\\s]+', ' ', text) # remove punctuation\n text = re.sub(r'[ ]+', ' ', text) # remove extra spaces\n text = text.lower()\n\n phonemizeds = []\n for text_eng_chn in re.split(r\"[^\\w\\s']+\", text):\n # split chinese and english\n for text in re.split(r\"([a-z ]+)\", text_eng_chn):\n text = text.strip()\n if text == '' or text == \"'\":\n continue\n d = []\n if re.match(r\"[a-z ']+\", text):\n for word in re.split(r\"[ ]+\", text):\n phonemizeds.append(word)\n else:\n phones = []\n for n, py in enumerate(\n pinyin(\n text, style=Style.TONE3, neutral_tone_with_five=True\n )\n ):\n if not py[0][-1].isalnum():\n raise ValueError\n phones.append(py[0])\n phonemizeds.append(self.separator.phone.join(phones))\n\n phonemizeds = f'{self.separator.word}'.join(\n [phones for phones in phonemizeds])\n return phonemizeds\n\n def tokenize(self, text):\n phones = []\n for word in re.split('([_-])', self.phonemize(text.strip())):\n if len(word):\n for phone in re.split('\\|', word):\n if len(phone):\n phones.append(phone)\n\n return phones\n\n def tokenize_lty(self, pinyin_tokens):\n lty_tokens_list = []\n\n for token in pinyin_tokens:\n if token in self.pinyin2lty.keys():\n lty_tokens = self.pinyin2lty[token]\n phones = self.separator.syllable.join(lty_tokens)\n lty_tokens = re.split(rf'({self.separator.syllable})', phones)\n lty_tokens_list += lty_tokens\n else:\n lty_tokens_list.append(token)\n return lty_tokens_list" }, { "identifier": "read_textgrid", "path": "utils/textgrid.py", "snippet": "def read_textgrid(filename, fileEncoding=\"utf-8\"):\n \"\"\"\n Reads a TextGrid file into a dictionary object\n each dictionary has the following keys:\n \"start\"\n \"stop\"\n \"name\"\n \"tier\"\n\n Points and intervals use the same format, \n but the value for \"start\" and \"stop\" are the same\n\n Optionally, supply fileEncoding as argument. This defaults to \"utf-8\", tested with 'utf-16-be'.\n \"\"\"\n if isinstance(filename, str):\n with open(filename, \"r\", encoding=fileEncoding) as f:\n content = _read(f)\n elif hasattr(filename, \"readlines\"):\n content = _read(filename)\n else:\n raise TypeError(\"filename must be a string or a readable buffer\")\n\n interval_lines = [i for i, line in enumerate(content)\n if line.startswith(\"intervals [\")\n or line.startswith(\"points [\")]\n# tier_lines, tiers = [(i, line.split('\"')[-2]) \n# for i, line in enumerate(content)\n# if line.startswith(\"name =\")]\n tier_lines = []\n tiers = []\n for i, line in enumerate(content):\n if line.startswith(\"name =\"):\n tier_lines.append(i)\n tiers.append(line.split('\"')[-2]) \n\n interval_tiers = _find_tiers(interval_lines, tier_lines, tiers)\n assert len(interval_lines) == len(interval_tiers)\n return [_build_entry(i, content, t) for i, t in zip(interval_lines, interval_tiers)]" }, { "identifier": "HIFIGAN_SR", "path": "modules/tokenizer.py", "snippet": "HIFIGAN_SR = 16000" }, { "identifier": "HIFIGAN_HOP_LENGTH", "path": "modules/tokenizer.py", "snippet": "HIFIGAN_HOP_LENGTH = 256" }, { "identifier": "MelSpecExtractor", "path": "modules/tokenizer.py", "snippet": "class MelSpecExtractor(FeatureExtractor):\n name = \"mel_spec\"\n config_type = AudioFeatExtraConfig\n\n @property\n def frame_shift(self) -> Seconds:\n return self.config.frame_shift\n\n def feature_dim(self, sampling_rate: int) -> int:\n return self.config.feature_dim\n\n def extract(self, samples: Union[np.ndarray, torch.Tensor], sampling_rate: int) -> np.ndarray:\n assert sampling_rate == HIFIGAN_SR\n if not isinstance(samples, torch.Tensor):\n samples = torch.from_numpy(samples)\n torch.set_num_threads(1)\n # Hifigan\n mel_spec = ta.transforms.MelSpectrogram(\n sample_rate=sampling_rate,\n n_fft=HIFIGAN_NFFT,\n win_length=HIFIGAN_WIN_LENGTH,\n hop_length=HIFIGAN_HOP_LENGTH,\n n_mels=self.config.feature_dim,\n f_min=0,\n f_max=HIFIGAN_MAX_FREQ,\n power=1,\n )(samples)\n duration = round(samples.shape[-1] / sampling_rate, ndigits=12)\n num_frames = compute_num_frames(\n duration=duration,\n frame_shift=self.frame_shift,\n sampling_rate=sampling_rate,\n )\n return mel_spec.squeeze(0).permute(1, 0)[:num_frames, :].numpy()" }, { "identifier": "AudioFeatExtraConfig", "path": "modules/tokenizer.py", "snippet": "class AudioFeatExtraConfig:\n frame_shift: Seconds = HIFIGAN_HOP_LENGTH / HIFIGAN_SR\n feature_dim: int = HIFIGAN_MEL_CHANNELS" }, { "identifier": "SymbolTable", "path": "utils/symbol_table.py", "snippet": "class SymbolTable(Generic[Symbol]):\n '''SymbolTable that maps symbol IDs, found on the FSA arcs to\n actual objects. These objects can be arbitrary Python objects\n that can serve as keys in a dictionary (i.e. they need to be\n hashable and immutable).\n\n The SymbolTable can only be read to/written from disk if the\n symbols are strings.\n '''\n _id2sym: Dict[int, Symbol] = field(default_factory=dict)\n '''Map an integer to a symbol.\n '''\n\n _sym2id: Dict[Symbol, int] = field(default_factory=dict)\n '''Map a symbol to an integer.\n '''\n\n _next_available_id: int = 1\n '''A helper internal field that helps adding new symbols\n to the table efficiently.\n '''\n\n eps: Symbol = '<eps>'\n '''Null symbol, always mapped to index 0.\n '''\n\n def __post_init__(self):\n for idx, sym in self._id2sym.items():\n assert self._sym2id[sym] == idx\n assert idx >= 0\n\n for sym, idx in self._sym2id.items():\n assert idx >= 0\n assert self._id2sym[idx] == sym\n\n if 0 not in self._id2sym:\n self._id2sym[0] = self.eps\n self._sym2id[self.eps] = 0\n else:\n assert self._id2sym[0] == self.eps\n assert self._sym2id[self.eps] == 0\n\n self._next_available_id = max(self._id2sym) + 1\n\n @staticmethod\n def from_str(s: str) -> 'SymbolTable':\n '''Build a symbol table from a string.\n\n The string consists of lines. Every line has two fields separated\n by space(s), tab(s) or both. The first field is the symbol and the\n second the integer id of the symbol.\n\n Args:\n s:\n The input string with the format described above.\n Returns:\n An instance of :class:`SymbolTable`.\n '''\n id2sym: Dict[int, str] = dict()\n sym2id: Dict[str, int] = dict()\n\n for line in s.split('\\n'):\n fields = line.split()\n if len(fields) == 0:\n continue # skip empty lines\n assert len(fields) == 2, \\\n f'Expect a line with 2 fields. Given: {len(fields)}'\n sym, idx = fields[0], int(fields[1])\n assert sym not in sym2id, f'Duplicated symbol {sym}'\n assert idx not in id2sym, f'Duplicated id {idx}'\n id2sym[idx] = sym\n sym2id[sym] = idx\n\n eps = id2sym.get(0, '<eps>')\n\n return SymbolTable(_id2sym=id2sym, _sym2id=sym2id, eps=eps)\n\n @staticmethod\n def from_file(filename: str) -> 'SymbolTable':\n '''Build a symbol table from file.\n\n Every line in the symbol table file has two fields separated by\n space(s), tab(s) or both. The following is an example file:\n\n .. code-block::\n\n <eps> 0\n a 1\n b 2\n c 3\n\n Args:\n filename:\n Name of the symbol table file. Its format is documented above.\n\n Returns:\n An instance of :class:`SymbolTable`.\n\n '''\n with open(filename, 'r', encoding='utf-8') as f:\n return SymbolTable.from_str(f.read().strip())\n\n def to_str(self) -> str:\n '''\n Returns:\n Return a string representation of this object. You can pass\n it to the method ``from_str`` to recreate an identical object.\n '''\n s = ''\n for idx, symbol in sorted(self._id2sym.items()):\n s += f'{symbol} {idx}\\n'\n return s\n\n def to_file(self, filename: str):\n '''Serialize the SymbolTable to a file.\n\n Every line in the symbol table file has two fields separated by\n space(s), tab(s) or both. The following is an example file:\n\n .. code-block::\n\n <eps> 0\n a 1\n b 2\n c 3\n\n Args:\n filename:\n Name of the symbol table file. Its format is documented above.\n '''\n with open(filename, 'w') as f:\n for idx, symbol in sorted(self._id2sym.items()):\n print(symbol, idx, file=f)\n\n def add(self, symbol: Symbol, index: Optional[int] = None) -> int:\n '''Add a new symbol to the SymbolTable.\n\n Args:\n symbol:\n The symbol to be added.\n index:\n Optional int id to which the symbol should be assigned.\n If it is not available, a ValueError will be raised.\n\n Returns:\n The int id to which the symbol has been assigned.\n '''\n # Already in the table? Return its ID.\n if symbol in self._sym2id:\n return self._sym2id[symbol]\n # Specific ID not provided - use next available.\n if index is None:\n index = self._next_available_id\n # Specific ID provided but not available.\n if index in self._id2sym:\n raise ValueError(f\"Cannot assign id '{index}' to '{symbol}' - \"\n f\"already occupied by {self._id2sym[index]}\")\n self._sym2id[symbol] = index\n self._id2sym[index] = symbol\n\n # Update next available ID if needed\n if self._next_available_id <= index:\n self._next_available_id = index + 1\n\n return index\n\n def get(self, k: Union[int, Symbol]) -> Union[Symbol, int]:\n '''Get a symbol for an id or get an id for a symbol\n\n Args:\n k:\n If it is an id, it tries to find the symbol corresponding\n to the id; if it is a symbol, it tries to find the id\n corresponding to the symbol.\n\n Returns:\n An id or a symbol depending on the given `k`.\n '''\n if isinstance(k, int):\n return self._id2sym[k]\n else:\n return self._sym2id[k]\n\n def merge(self, other: 'SymbolTable') -> 'SymbolTable':\n '''Create a union of two SymbolTables.\n Raises an AssertionError if the same IDs are occupied by\n different symbols.\n\n Args:\n other:\n A symbol table to merge with ``self``.\n\n Returns:\n A new symbol table.\n '''\n self._check_compatible(other)\n\n id2sym = {**self._id2sym, **other._id2sym}\n sym2id = {**self._sym2id, **other._sym2id}\n\n return SymbolTable(_id2sym=id2sym, _sym2id=sym2id, eps=self.eps)\n\n def _check_compatible(self, other: 'SymbolTable') -> None:\n # Epsilon compatibility\n assert self.eps == other.eps, f'Mismatched epsilon symbol: ' \\\n f'{self.eps} != {other.eps}'\n # IDs compatibility\n common_ids = set(self._id2sym).intersection(other._id2sym)\n for idx in common_ids:\n assert self[idx] == other[idx], f'ID conflict for id: {idx}, ' \\\n f'self[idx] = \"{self[idx]}\", ' \\\n f'other[idx] = \"{other[idx]}\"'\n # Symbols compatibility\n common_symbols = set(self._sym2id).intersection(other._sym2id)\n for sym in common_symbols:\n assert self[sym] == other[sym], f'ID conflict for id: {sym}, ' \\\n f'self[sym] = \"{self[sym]}\", ' \\\n f'other[sym] = \"{other[sym]}\"'\n\n def __getitem__(self, item: Union[int, Symbol]) -> Union[Symbol, int]:\n return self.get(item)\n\n def __contains__(self, item: Union[int, Symbol]) -> bool:\n if isinstance(item, int):\n return item in self._id2sym\n else:\n return item in self._sym2id\n\n def __len__(self) -> int:\n return len(self._id2sym)\n\n def __eq__(self, other: 'SymbolTable') -> bool:\n if len(self) != len(other):\n return False\n\n for s in self.symbols:\n if self[s] != other[s]:\n return False\n\n return True\n\n @property\n def ids(self) -> List[int]:\n '''Returns a list of integer IDs corresponding to the symbols.\n '''\n ans = list(self._id2sym.keys())\n ans.sort()\n return ans\n\n @property\n def symbols(self) -> List[Symbol]:\n '''Returns a list of symbols (e.g., strings) corresponding to\n the integer IDs.\n '''\n ans = list(self._sym2id.keys())\n ans.sort()\n return ans" } ]
import os import glob import argparse import soundfile as sf import librosa from modules.tokenizer import TextTokenizer from multiprocessing import Pool from tqdm.auto import tqdm from utils.textgrid import read_textgrid from lhotse import validate_recordings_and_supervisions, CutSet, NumpyHdf5Writer, load_manifest_lazy from lhotse.audio import Recording, RecordingSet from lhotse.supervision import SupervisionSegment, SupervisionSet from lhotse.recipes.utils import read_manifests_if_cached from lhotse.utils import Seconds, compute_num_frames from functools import partial from modules.tokenizer import ( HIFIGAN_SR, HIFIGAN_HOP_LENGTH, MelSpecExtractor, AudioFeatExtraConfig ) from utils.symbol_table import SymbolTable
4,618
''' wavs dir ├── speaker1 │ ├── s1wav1.wav │ ├── s1wav1.txt │ ├── s1wav2.wav │ ├── s1wav2.txt │ ├── ... ├── speaker2 │ ├── s2wav1.wav │ ├── s2wav1.txt │ ├── ... cautions: stage 0 will delete all txt files in wavs dir ''' def make_lab(tt, wav): id = wav.split('/')[-1].split('.')[0] folder = '/'.join(wav.split('/')[:-1]) # Create lab files with open(f'{folder}/{id}.txt', 'r') as f: txt = f.read() with open(f'{folder}/{id}.lab', 'w') as f: f.write(' '.join(tt.tokenize(txt))) class DatasetMaker: def __init__(self): parser = argparse.ArgumentParser() parser.add_argument('--stage', type=int, default=0, help='Stage to start from') parser.add_argument('--wavtxt_path', type=str, default='data/wavs/', help='Path to wav and txt files') parser.add_argument('--text_grid_path', type=str, default='data/textgrids/', help='Path to textgrid files') parser.add_argument('--ds_path', type=str, default='data/ds/', help='Path to save dataset') parser.add_argument('--num_workers', type=int, default=4, help='Number of workers') parser.add_argument('--test_set_ratio', type=float, default=0.03, help='Test set ratio') parser.add_argument('--trim_wav', type=bool, default=False, help='Trim wav by textgrid') self.args = parser.parse_args() self.test_set_interval = int(1 / self.args.test_set_ratio) def make_labs(self): wavs = glob.glob(f'{self.args.wavtxt_path}/**/*.wav', recursive=True) tt = TextTokenizer() with Pool(self.args.num_workers) as p: for _ in tqdm(p.imap(partial(make_lab, tt), wavs), total=len(wavs)): pass def make_ds(self): tgs = glob.glob( f'{self.args.text_grid_path}/**/*.TextGrid', recursive=True) recordings = [[], []] # train, test supervisions = [[], []] set_name = ['train', 'valid'] max_duration_token = 0 for i, tg in tqdm(enumerate(tgs)): id = tg.split('/')[-1].split('.')[0] speaker = tg.split('/')[-2] intervals = [i for i in read_textgrid(tg) if (i[3] == 'phones')] y, sr = librosa.load( f'{self.args.wavtxt_path}/{speaker}/{id}.wav', sr=HIFIGAN_SR) if intervals[0][2] == '': intervals = intervals[1:] if intervals[-1][2] == '': intervals = intervals[:-1] if self.args.trim_wav: start = intervals[0][0]*sr stop = intervals[-1][1]*sr y = y[int(start):int(stop)] y = librosa.util.normalize(y) sf.write( f'{self.args.wavtxt_path}/{speaker}/{id}.wav', y, HIFIGAN_SR) start = intervals[0][0] stop = intervals[-1][1]
''' wavs dir ├── speaker1 │ ├── s1wav1.wav │ ├── s1wav1.txt │ ├── s1wav2.wav │ ├── s1wav2.txt │ ├── ... ├── speaker2 │ ├── s2wav1.wav │ ├── s2wav1.txt │ ├── ... cautions: stage 0 will delete all txt files in wavs dir ''' def make_lab(tt, wav): id = wav.split('/')[-1].split('.')[0] folder = '/'.join(wav.split('/')[:-1]) # Create lab files with open(f'{folder}/{id}.txt', 'r') as f: txt = f.read() with open(f'{folder}/{id}.lab', 'w') as f: f.write(' '.join(tt.tokenize(txt))) class DatasetMaker: def __init__(self): parser = argparse.ArgumentParser() parser.add_argument('--stage', type=int, default=0, help='Stage to start from') parser.add_argument('--wavtxt_path', type=str, default='data/wavs/', help='Path to wav and txt files') parser.add_argument('--text_grid_path', type=str, default='data/textgrids/', help='Path to textgrid files') parser.add_argument('--ds_path', type=str, default='data/ds/', help='Path to save dataset') parser.add_argument('--num_workers', type=int, default=4, help='Number of workers') parser.add_argument('--test_set_ratio', type=float, default=0.03, help='Test set ratio') parser.add_argument('--trim_wav', type=bool, default=False, help='Trim wav by textgrid') self.args = parser.parse_args() self.test_set_interval = int(1 / self.args.test_set_ratio) def make_labs(self): wavs = glob.glob(f'{self.args.wavtxt_path}/**/*.wav', recursive=True) tt = TextTokenizer() with Pool(self.args.num_workers) as p: for _ in tqdm(p.imap(partial(make_lab, tt), wavs), total=len(wavs)): pass def make_ds(self): tgs = glob.glob( f'{self.args.text_grid_path}/**/*.TextGrid', recursive=True) recordings = [[], []] # train, test supervisions = [[], []] set_name = ['train', 'valid'] max_duration_token = 0 for i, tg in tqdm(enumerate(tgs)): id = tg.split('/')[-1].split('.')[0] speaker = tg.split('/')[-2] intervals = [i for i in read_textgrid(tg) if (i[3] == 'phones')] y, sr = librosa.load( f'{self.args.wavtxt_path}/{speaker}/{id}.wav', sr=HIFIGAN_SR) if intervals[0][2] == '': intervals = intervals[1:] if intervals[-1][2] == '': intervals = intervals[:-1] if self.args.trim_wav: start = intervals[0][0]*sr stop = intervals[-1][1]*sr y = y[int(start):int(stop)] y = librosa.util.normalize(y) sf.write( f'{self.args.wavtxt_path}/{speaker}/{id}.wav', y, HIFIGAN_SR) start = intervals[0][0] stop = intervals[-1][1]
frame_shift=HIFIGAN_HOP_LENGTH / HIFIGAN_SR
3
2023-12-10 15:02:54+00:00
8k
ml-stat-Sustech/TorchCP
examples/imagenet_example.py
[ { "identifier": "ClassWisePredictor", "path": "torchcp/classification/predictors/classwise.py", "snippet": "class ClassWisePredictor(SplitPredictor):\n \"\"\"\n\n Applications of Class-Conditional Conformal Predictor in Multi-Class Classification (Shi et al., 2013)\n paper: https://ieeexplore.ieee.org/document/6784618\n \n \n :param score_function: non-conformity score function.\n :param model: a pytorch model.\n \"\"\"\n\n def __init__(self, score_function, model=None):\n super(ClassWisePredictor, self).__init__(score_function, model)\n self.q_hat = None\n\n def calculate_threshold(self, logits, labels, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n logits = logits.to(self._device)\n labels = labels.to(self._device)\n # Count the number of classes\n num_classes = logits.shape[1]\n self.q_hat = torch.zeros(num_classes, device=self._device)\n for label in range(num_classes):\n x_cal_tmp = logits[labels == label]\n y_cal_tmp = labels[labels == label]\n scores = self.score_function(x_cal_tmp, y_cal_tmp)\n self.q_hat[label] = self._calculate_conformal_value(scores, alpha)" }, { "identifier": "ClusterPredictor", "path": "torchcp/classification/predictors/cluster.py", "snippet": "class ClusterPredictor(SplitPredictor):\n \"\"\"\n Class-Conditional Conformal Prediction with Many Classes (Ding et al., 2023).\n paper: https://arxiv.org/abs/2306.09335.\n \n :param score_function: a non-conformity score function.\n :param model: a pytorch model.\n :param ratio_clustering: the ratio of examples in the calibration dataset used to cluster classes.\n :param num_clusters: the number of clusters. If ratio_clustering is \"auto\", the number of clusters is automatically computed.\n :param split: the method to split the dataset into clustering dataset and calibration set. Options are 'proportional' (sample proportional to distribution such that rarest class has n_clustering example), 'doubledip' (don't split and use all data for both steps, or 'random' (each example is assigned to clustering step with some fixed probability).\n \"\"\"\n\n def __init__(self, score_function, model=None, ratio_clustering=\"auto\", num_clusters=\"auto\", split='random',\n temperature=1):\n\n super(ClusterPredictor, self).__init__(score_function, model, temperature)\n self.__ratio_clustering = ratio_clustering\n self.__num_clusters = num_clusters\n self.__split = split\n\n def calculate_threshold(self, logits, labels, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n logits = logits.to(self._device)\n labels = labels.to(self._device)\n num_classes = logits.shape[1]\n scores = self.score_function(logits, labels)\n\n alpha = torch.tensor(alpha, device=self._device)\n classes_statistics = torch.tensor([torch.sum(labels == k).item() for k in range(num_classes)],\n device=self._device)\n\n # 1) Choose necessary parameters for Cluster algorithm\n if self.__ratio_clustering == 'auto' and self.__num_clusters == 'auto':\n n_min = torch.min(classes_statistics)\n n_thresh = self.__get_quantile_minimum(alpha)\n # Classes with fewer than n_thresh examples will be excluded from clustering\n n_min = torch.maximum(n_min, n_thresh)\n num_remaining_classes = torch.sum((classes_statistics >= n_min).float())\n\n # Compute the number of clusters and the minium number of examples for each class\n n_clustering = (n_min * num_remaining_classes / (75 + num_remaining_classes)).clone().to(\n torch.int32).to(self._device)\n self.__num_clusters = torch.floor(n_clustering / 2).to(torch.int32)\n self.__ratio_clustering = n_clustering / n_min\n\n # 2) Split data\n clustering_scores, clustering_labels, cal_scores, cal_labels = self.__split_data(scores,\n labels,\n classes_statistics)\n\n # 3) Filter \"rare\" classes\n rare_classes = self.__get_rare_classes(clustering_labels, alpha, num_classes)\n\n # 4) Run clustering\n if (num_classes - len(rare_classes) > self.__num_clusters) and (self.__num_clusters > 1):\n # Filter out rare classes and re-index\n remaining_idx, filtered_labels, class_remapping = self.__remap_classes(clustering_labels, rare_classes)\n filtered_scores = clustering_scores[remaining_idx]\n\n # Compute embedding for each class and get class counts\n embeddings, class_cts = self.__embed_all_classes(filtered_scores, filtered_labels)\n kmeans = KMeans(n_clusters=int(self.__num_clusters), n_init=10).fit(X=embeddings.detach().cpu().numpy(),\n sample_weight=np.sqrt(\n class_cts.detach().cpu().numpy()))\n nonrare_class_cluster_assignments = torch.tensor(kmeans.labels_, device=self._device)\n\n cluster_assignments = - torch.ones((num_classes,), dtype=torch.int32, device=self._device)\n\n for cls, remapped_cls in class_remapping.items():\n cluster_assignments[cls] = nonrare_class_cluster_assignments[remapped_cls]\n else:\n cluster_assignments = - torch.ones((num_classes,), dtype=torch.int32, device=self._device)\n\n # 5) Compute qhats for each cluster\n\n self.q_hat = self.__compute_cluster_specific_qhats(cluster_assignments,\n cal_scores,\n cal_labels,\n alpha)\n\n def __split_data(self, scores, labels, classes_statistics):\n if self.__split == 'proportional':\n # Split dataset along with fraction \"frac_clustering\"\n num_classes = classes_statistics.shape[0]\n n_k = torch.tensor([self.__ratio_clustering * classes_statistics[k] for k in range(num_classes)],\n device=self._device, dtype=torch.int32)\n idx1 = torch.zeros(labels.shape, dtype=torch.bool, device=self._device)\n for k in range(num_classes):\n # Randomly select n instances of class k\n idx = torch.argwhere(labels == k).flatten()\n random_indices = torch.randint(0, classes_statistics[k], (n_k[k],), device=self._device)\n selected_idx = idx[random_indices]\n idx1[selected_idx] = 1\n clustering_scores = scores[idx1]\n clustering_labels = labels[idx1]\n cal_scores = scores[~idx1]\n cal_labels = labels[~idx1]\n\n elif self.__split == 'doubledip':\n clustering_scores, clustering_labels = scores, labels\n cal_scores, cal_labels = scores, labels\n\n elif self.__split == 'random':\n # Each point is assigned to clustering set w.p. frac_clustering \n idx1 = torch.rand(size=(len(labels),), device=self._device) < self.__ratio_clustering\n\n clustering_scores = scores[idx1]\n clustering_labels = labels[idx1]\n cal_scores = scores[~idx1]\n cal_labels = labels[~idx1]\n else:\n raise Exception(\"Invalid split method. Options are 'proportional', 'doubledip', and 'random'\")\n return clustering_scores, clustering_labels, cal_scores, cal_labels\n\n def __get_quantile_minimum(self, alpha):\n \"\"\"\n Compute smallest n such that ceil((n+1)*(1-alpha)/n) <= 1\n \"\"\"\n n = torch.tensor(0, device=alpha.device)\n while torch.ceil((n + 1) * (1 - alpha) / n) > 1:\n n += 1\n return n\n\n def __get_rare_classes(self, labels, alpha, num_classes):\n \"\"\"\n Choose classes whose number is less than or equal to .\n \"\"\"\n thresh = self.__get_quantile_minimum(alpha)\n classes, cts = torch.unique(labels, return_counts=True)\n rare_classes = classes[cts < thresh].to(self._device)\n\n # Also included any classes that are so rare that we have 0 labels for it\n\n all_classes = torch.arange(num_classes, device=self._device)\n zero_ct_classes = all_classes[(all_classes.view(1, -1) != classes.view(-1, 1)).all(dim=0)]\n rare_classes = torch.concatenate((rare_classes, zero_ct_classes))\n\n return rare_classes\n\n def __remap_classes(self, labels, rare_classes):\n \"\"\"\n Exclude classes in rare_classes and remap remaining classes to be 0-indexed\n\n :returns:\n - remaining_idx: Boolean array the same length as labels. Entry i is True\n if labels[i] is not in rare_classes\n - remapped_labels : Array that only contains the entries of labels that are\n not in rare_classes (in order)\n - remapping : Dict mapping old class index to new class index\n\n \"\"\"\n labels = labels.detach().cpu().numpy()\n rare_classes = rare_classes.detach().cpu().numpy()\n remaining_idx = ~np.isin(labels, rare_classes)\n\n remaining_labels = labels[remaining_idx]\n remapped_labels = np.zeros(remaining_labels.shape, dtype=int)\n new_idx = 0\n remapping = {}\n for i in range(len(remaining_labels)):\n if remaining_labels[i] in remapping:\n remapped_labels[i] = remapping[remaining_labels[i]]\n else:\n remapped_labels[i] = new_idx\n remapping[remaining_labels[i]] = new_idx\n new_idx += 1\n\n return torch.from_numpy(remaining_idx).to(self._device), torch.tensor(remapped_labels,\n device=self._device), remapping\n\n def __embed_all_classes(self, scores_all, labels, q=[0.5, 0.6, 0.7, 0.8, 0.9]):\n \"\"\"\n :param scores_all: num_instances-length array where scores_all[i] = score of true class for instance i.\n :param labels: num_instances-length array of true class labels.\n :param q: quantiles to include in embedding.\n\n :returns:\n - embeddings: num_classes x len(q) array where ith row is the embeddings of class i.\n - cts: num_classes-length array where cts[i] = # of times class i appears in labels .\n \"\"\"\n num_classes = len(torch.unique(labels))\n embeddings = torch.zeros((num_classes, len(q)), device=self._device)\n cts = torch.zeros((num_classes,), device=self._device)\n\n for i in range(num_classes):\n if len(scores_all.shape) > 1:\n raise DimensionError(f\"Expected 1-dimension, but got {len(scores_all.shape)}-dimension.\")\n\n class_i_scores = scores_all[labels == i]\n\n cts[i] = class_i_scores.shape[0]\n # Computes the q-quantiles of samples and returns the vector of quantiles\n embeddings[i, :] = torch.quantile(class_i_scores, torch.tensor(q, device=self._device))\n\n return embeddings, cts\n\n def __compute_cluster_specific_qhats(self, cluster_assignments, cal_class_scores, cal_true_labels, alpha):\n '''\n Computes cluster-specific quantiles (one for each class) that will result in marginal coverage of (1-alpha)\n \n :param cluster_assignments: num_classes length array where entry i is the index of the cluster that class i belongs to. Rare classes can be assigned to cluster -1 and they will automatically be given as default_qhat. \n :param cal_class_scores: cal_class_scores[i] is the score for instance i.\n :param cal_true_labels: true class labels for instances\n :param alpha: Desired coverage level\n\n\n :return : num_classes length array where entry i is the quantile correspond to the cluster that class i belongs to.\n '''\n\n # Map true class labels to clusters\n cal_true_clusters = torch.tensor([cluster_assignments[label] for label in cal_true_labels], device=self._device)\n num_clusters = torch.max(cluster_assignments) + 1\n \n cluster_qhats = self.__compute_class_specific_qhats(cal_class_scores, cal_true_clusters, num_clusters, alpha)\n # Map cluster qhats back to classes\n num_classes = len(cluster_assignments)\n qhats_class = torch.tensor([cluster_qhats[cluster_assignments[k]] for k in range(num_classes)],\n device=self._device)\n\n return qhats_class\n\n def __compute_class_specific_qhats(self, cal_class_scores, cal_true_clusters, num_clusters, alpha):\n '''\n Computes class-specific quantiles (one for each class) that will result in marginal coverage of (1-alpha)\n \n :param cal_class_scores: num_instances-length array where cal_class_scores[i] is the score for instance i\n :param cal_true_clusters: num_instances-length array of true class labels. If class -1 appears, it will be assigned the null_qhat value. It is appended as an extra entry of the returned q_hats so that q_hats[-1] = null_qhat.\n :param num_clusters: the number of clusters.\n :param alpha: Desired coverage level.\n\n :return: the threshold of each class\n '''\n\n # Compute quantile q_hat that will result in marginal coverage of (1-alpha)\n null_qhat = self._calculate_conformal_value(cal_class_scores, alpha)\n\n q_hats = torch.zeros((num_clusters,), device=self._device) # q_hats[i] = quantile for class i\n for k in range(num_clusters):\n # Only select data for which k is true class\n idx = (cal_true_clusters == k)\n scores = cal_class_scores[idx]\n q_hats[k] = self._calculate_conformal_value(scores, alpha)\n if -1 in cal_true_clusters:\n q_hats = torch.concatenate((q_hats, torch.tensor([null_qhat], device=self._device)))\n\n return q_hats" }, { "identifier": "SplitPredictor", "path": "torchcp/classification/predictors/split.py", "snippet": "class SplitPredictor(BasePredictor):\n \"\"\"\n Split Conformal Prediction (Vovk et a., 2005).\n Book: https://link.springer.com/book/10.1007/978-3-031-06649-8.\n \n :param score_function: non-conformity score function.\n :param model: a pytorch model.\n :param temperature: the temperature of Temperature Scaling.\n \"\"\"\n def __init__(self, score_function, model=None, temperature=1):\n super().__init__(score_function, model, temperature)\n\n #############################\n # The calibration process\n ############################\n def calibrate(self, cal_dataloader, alpha):\n self._model.eval()\n logits_list = []\n labels_list = []\n with torch.no_grad():\n for examples in cal_dataloader:\n tmp_x, tmp_labels = examples[0].to(self._device), examples[1].to(self._device)\n tmp_logits = self._logits_transformation(self._model(tmp_x)).detach()\n logits_list.append(tmp_logits)\n labels_list.append(tmp_labels)\n logits = torch.cat(logits_list).float()\n labels = torch.cat(labels_list)\n self.calculate_threshold(logits, labels, alpha)\n\n def calculate_threshold(self, logits, labels, alpha):\n if alpha >= 1 or alpha <= 0:\n raise ValueError(\"Significance level 'alpha' must be in (0,1).\")\n logits = logits.to(self._device)\n labels = labels.to(self._device)\n scores = self.score_function(logits, labels)\n self.q_hat = self._calculate_conformal_value(scores, alpha)\n\n def _calculate_conformal_value(self, scores, alpha):\n \"\"\"\n Calculate the 1-alpha quantile of scores.\n \n :param scores: non-conformity scores.\n :param alpha: a significance level.\n \n :return: the threshold which is use to construct prediction sets.\n \"\"\"\n if len(scores) == 0:\n warnings.warn(\n \"The number of scores is 0, which is a invalid scores. To avoid program crash, the threshold is set as torch.inf.\")\n return torch.inf\n qunatile_value = math.ceil(scores.shape[0] + 1) * (1 - alpha) / scores.shape[0]\n\n if qunatile_value > 1:\n warnings.warn(\n \"The value of quantile exceeds 1. It should be a value in (0,1). To avoid program crash, the threshold is set as torch.inf.\")\n return torch.inf\n\n return torch.quantile(scores, qunatile_value).to(self._device)\n\n #############################\n # The prediction process\n ############################\n def predict(self, x_batch):\n \"\"\"\n The input of score function is softmax probability.\n\n :param x_batch: a batch of instances.\n \"\"\"\n self._model.eval()\n if self._model != None:\n x_batch = self._model(x_batch.to(self._device)).float()\n x_batch = self._logits_transformation(x_batch).detach()\n sets = self.predict_with_logits(x_batch)\n return sets\n\n def predict_with_logits(self, logits, q_hat=None):\n \"\"\"\n The input of score function is softmax probability.\n if q_hat is not given by the function 'self.calibrate', the construction progress of prediction set is a naive method.\n\n :param logits: model output before softmax.\n :param q_hat: the conformal threshold.\n\n :return: prediction sets\n \"\"\"\n scores = self.score_function(logits).to(self._device)\n if q_hat is None:\n S = self._generate_prediction_set(scores, self.q_hat)\n else:\n S = self._generate_prediction_set(scores, q_hat)\n return S\n\n #############################\n # The evaluation process\n ############################\n\n def evaluate(self, val_dataloader):\n prediction_sets = []\n labels_list = []\n with torch.no_grad():\n for examples in val_dataloader:\n tmp_x, tmp_label = examples[0].to(self._device), examples[1].to(self._device)\n prediction_sets_batch = self.predict(tmp_x)\n prediction_sets.extend(prediction_sets_batch)\n labels_list.append(tmp_label)\n val_labels = torch.cat(labels_list)\n\n res_dict = {\"Coverage_rate\": self._metric('coverage_rate')(prediction_sets, val_labels),\n \"Average_size\": self._metric('average_size')(prediction_sets, val_labels)}\n return res_dict" }, { "identifier": "APS", "path": "torchcp/classification/scores/aps.py", "snippet": "class APS(BaseScore):\n \"\"\"\n Adaptive Prediction Sets (Romano et al., 2020)\n paper :https://proceedings.neurips.cc/paper/2020/file/244edd7e85dc81602b7615cd705545f5-Paper.pdf\n \"\"\"\n\n def __call__(self, logits, label=None):\n assert len(logits.shape) <= 2, \"The dimension of logits must be less than 2.\"\n if len(logits.shape) == 1:\n logits = logits.unsqueeze(0)\n probs = torch.softmax(logits, dim=-1)\n if label is None:\n return self._calculate_all_label(probs)\n else:\n return self._calculate_single_label(probs, label)\n\n def _calculate_all_label(self, probs):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(probs.shape, device=probs.device)\n ordered_scores = cumsum - ordered * U\n _, sorted_indices = torch.sort(indices, descending=False, dim=-1)\n scores = ordered_scores.gather(dim=-1, index=sorted_indices)\n return scores\n\n def _sort_sum(self, probs):\n # ordered: the ordered probabilities in descending order\n # indices: the rank of ordered probabilities in descending order\n # cumsum: the accumulation of sorted probabilities\n ordered, indices = torch.sort(probs, dim=-1, descending=True)\n cumsum = torch.cumsum(ordered, dim=-1)\n return indices, ordered, cumsum\n\n def _calculate_single_label(self, probs, label):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(indices.shape[0], device=probs.device)\n idx = torch.where(indices == label.view(-1, 1))\n scores_first_rank = U * cumsum[idx]\n idx_minus_one = (idx[0], idx[1] - 1)\n scores_usual = U * ordered[idx] + cumsum[idx_minus_one]\n return torch.where(idx[1] == 0, scores_first_rank, scores_usual)" }, { "identifier": "RAPS", "path": "torchcp/classification/scores/raps.py", "snippet": "class RAPS(APS):\n \"\"\"\n Regularized Adaptive Prediction Sets (Angelopoulos et al., 2020)\n paper : https://arxiv.org/abs/2009.14193\n \n :param penalty: the weight of regularization. When penalty = 0, RAPS=APS.\n :param kreg: the rank of regularization which is an integer in [0,labels_num].\n \"\"\"\n\n def __init__(self, penalty, kreg=0):\n \n if penalty <= 0:\n raise ValueError(\"The parameter 'penalty' must be a positive value.\")\n if kreg < 0:\n raise ValueError(\"The parameter 'kreg' must be a nonnegative value.\")\n if type(kreg) != int:\n raise TypeError(\"The parameter 'kreg' must be a integer.\")\n super(RAPS, self).__init__()\n self.__penalty = penalty\n self.__kreg = kreg\n\n def _calculate_all_label(self, probs):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(probs.shape, device=probs.device)\n reg = torch.maximum(self.__penalty * (torch.arange(1, probs.shape[-1] + 1, device=probs.device) - self.__kreg),\n torch.tensor(0, device=probs.device))\n ordered_scores = cumsum - ordered * U + reg\n _, sorted_indices = torch.sort(indices, descending=False, dim=-1)\n scores = ordered_scores.gather(dim=-1, index=sorted_indices)\n return scores\n \n def _calculate_single_label(self, probs, label):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(indices.shape[0], device=probs.device)\n idx = torch.where(indices == label.view(-1, 1))\n reg = torch.maximum(self.__penalty * (idx[1] + 1 - self.__kreg), torch.tensor(0).to(probs.device))\n scores_first_rank = U * ordered[idx] + reg\n idx_minus_one = (idx[0], idx[1] - 1)\n scores_usual = U * ordered[idx] + cumsum[idx_minus_one] + reg\n return torch.where(idx[1] == 0, scores_first_rank, scores_usual)" }, { "identifier": "SAPS", "path": "torchcp/classification/scores/saps.py", "snippet": "class SAPS(APS):\n \"\"\"\n Sorted Adaptive Prediction Sets (Huang et al., 2023)\n paper: https://arxiv.org/abs/2310.06430\n \n :param weight: the weight of label ranking.\n \"\"\"\n\n def __init__(self, weight):\n\n super(SAPS, self).__init__()\n if weight <= 0:\n raise ValueError(\"The parameter 'weight' must be a positive value.\")\n self.__weight = weight\n\n def _calculate_all_label(self, probs):\n indices, ordered, cumsum = self._sort_sum(probs)\n ordered[:, 1:] = self.__weight\n cumsum = torch.cumsum(ordered, dim=-1)\n U = torch.rand(probs.shape, device=probs.device)\n ordered_scores = cumsum - ordered * U\n _, sorted_indices = torch.sort(indices, descending=False, dim=-1)\n scores = ordered_scores.gather(dim=-1, index=sorted_indices)\n return scores\n\n def _calculate_single_label(self, probs, label):\n indices, ordered, cumsum = self._sort_sum(probs)\n U = torch.rand(indices.shape[0], device=probs.device)\n idx = torch.where(indices == label.view(-1, 1))\n scores_first_rank = U * cumsum[idx]\n scores_usual = self.__weight * (idx[1] - U) + ordered[:, 0]\n return torch.where(idx[1] == 0, scores_first_rank, scores_usual)" }, { "identifier": "THR", "path": "torchcp/classification/scores/thr.py", "snippet": "class THR(BaseScore):\n \"\"\"\n Threshold conformal predictors (Sadinle et al., 2016).\n paper : https://arxiv.org/abs/1609.00451.\n \n :param score_type: a transformation on logits. Default: \"softmax\". Optional: \"softmax\", \"Identity\", \"log_softmax\" or \"log\".\n \"\"\"\n\n def __init__(self, score_type=\"softmax\") -> None:\n \n super().__init__()\n self.score_type = score_type\n if score_type == \"Identity\":\n self.transform = lambda x: x\n elif score_type == \"softmax\":\n self.transform = lambda x: torch.softmax(x, dim=- 1)\n elif score_type == \"log_softmax\":\n self.transform = lambda x: torch.log_softmax(x, dim=-1)\n elif score_type == \"log\":\n self.transform = lambda x: torch.log(x)\n else:\n raise NotImplementedError\n\n def __call__(self, logits, label=None):\n assert len(logits.shape) <= 2, \"The dimension of logits must be less than 2.\"\n if len(logits.shape) == 1:\n logits = logits.unsqueeze(0)\n temp_values = self.transform(logits)\n if label is None:\n return self.__calculate_all_label(temp_values)\n else:\n return self.__calculate_single_label(temp_values, label)\n\n def __calculate_single_label(self, temp_values, label):\n return 1 - temp_values[torch.arange(label.shape[0], device=temp_values.device), label]\n\n def __calculate_all_label(self, temp_values):\n return 1 - temp_values" }, { "identifier": "Metrics", "path": "torchcp/classification/utils/metrics.py", "snippet": "class Metrics:\n\n def __call__(self, metric) -> Any:\n if metric not in METRICS_REGISTRY_CLASSIFICATION.registered_names():\n raise NameError(f\"The metric: {metric} is not defined in TorchCP.\")\n return METRICS_REGISTRY_CLASSIFICATION.get(metric)" }, { "identifier": "fix_randomness", "path": "torchcp/utils/common.py", "snippet": "def fix_randomness(seed=0):\n \"\"\"\n Fix the random seed for python, torch, numpy.\n\n :param seed: the random seed\n \"\"\"\n np.random.seed(seed=seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n random.seed(seed)" }, { "identifier": "build_dataset", "path": "examples/common/dataset.py", "snippet": "def build_dataset(dataset_name, transform=None, mode='train'):\n # path of usr\n usr_dir = os.path.expanduser('~')\n data_dir = os.path.join(usr_dir, \"data\")\n\n if dataset_name == 'imagenet':\n if transform is None:\n transform = trn.Compose([\n trn.Resize(256),\n trn.CenterCrop(224),\n trn.ToTensor(),\n trn.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n dataset = dset.ImageFolder(data_dir + \"/imagenet/val\", transform)\n elif dataset_name == 'mnist':\n if transform is None:\n transform = trn.Compose([\n trn.ToTensor(),\n trn.Normalize((0.1307,), (0.3081,))\n ])\n if mode == \"train\":\n dataset = dset.MNIST(data_dir, train=True, download=True, transform=transform)\n elif mode == \"test\":\n dataset = dset.MNIST(data_dir, train=False, download=True, transform=transform)\n else:\n raise NotImplementedError\n\n return dataset" } ]
import argparse import os import torch import torchvision import torchvision.datasets as dset import torchvision.transforms as trn from tqdm import tqdm from torchcp.classification.predictors import ClusterPredictor, ClassWisePredictor, SplitPredictor from torchcp.classification.scores import THR, APS, SAPS, RAPS from torchcp.classification import Metrics from torchcp.utils import fix_randomness from examples.common.dataset import build_dataset
7,046
# Copyright (c) 2023-present, SUSTech-ML. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # if __name__ == '__main__': parser = argparse.ArgumentParser(description='') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--alpha', default=0.1, type=float) args = parser.parse_args() fix_randomness(seed=args.seed) ####################################### # Loading ImageNet dataset and a pytorch model ####################################### model_name = 'ResNet101' model = torchvision.models.resnet101(weights="IMAGENET1K_V1", progress=True) model_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(model_device)
# Copyright (c) 2023-present, SUSTech-ML. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # if __name__ == '__main__': parser = argparse.ArgumentParser(description='') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--alpha', default=0.1, type=float) args = parser.parse_args() fix_randomness(seed=args.seed) ####################################### # Loading ImageNet dataset and a pytorch model ####################################### model_name = 'ResNet101' model = torchvision.models.resnet101(weights="IMAGENET1K_V1", progress=True) model_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(model_device)
dataset = build_dataset('imagenet')
9
2023-12-06 09:08:41+00:00
8k
vintagedave/Fontimize
tests.py
[ { "identifier": "get_used_characters_in_html", "path": "fontimize.py", "snippet": "def get_used_characters_in_html(html : str) -> set[chr]:\n soup = BeautifulSoup(html, 'html.parser')\n text = soup.get_text()\n return get_used_characters_in_str(text)" }, { "identifier": "charPair", "path": "fontimize.py", "snippet": "class charPair:\n def __init__(self, first : chr, second : chr):\n self.first = first\n self.second = second\n\n def __str__(self):\n return \"[\" + self.first + \"-\" + self.second + \"]\" # Pairs are inclusive\n \n # For print()-ing\n def __repr__(self):\n return self.__str__()\n \n def __eq__(self, other):\n if isinstance(other, charPair):\n return self.first == other.first and self.second == other.second\n return False\n \n def get_range(self):\n if self.first == self.second:\n return _get_unicode_string(self.first)\n else:\n return _get_unicode_string(self.first) + '-' + _get_unicode_string(self.second, False) # Eg \"U+0061-0071\"" }, { "identifier": "_get_char_ranges", "path": "fontimize.py", "snippet": "def _get_char_ranges(chars : list[chr]):\n chars.sort()\n if not chars:\n return []\n res : list[charPair] = []\n first : chr = chars[0]\n prev_seen : chr = first\n for c in chars[1:]:\n expected_next_char = chr(ord(prev_seen) + 1)\n if c != expected_next_char:\n # non-sequential, so time to start a new set\n pair = charPair(first, prev_seen)\n res.append(pair)\n first = c\n prev_seen = c\n # add final set if it hasn't been added yet\n if (not res) or (res[-1].second != prev_seen):\n pair = charPair(first, prev_seen)\n res.append(pair)\n\n return res" }, { "identifier": "optimise_fonts", "path": "fontimize.py", "snippet": "def optimise_fonts(text : str, fonts : list[str], fontpath : str = \"\", subsetname = \"FontimizeSubset\", verbose : bool = False, print_stats : bool = True) -> dict[str, typing.Any]:\n verbosity = 2 if verbose else 0 # ttf2web has 0, 1, 2, so match that to off and on\n\n res : dict[str, typing.Any] = {}\n res[\"css\"] = {} # at this level there are no CSS files, include just to prevent errors for API consumer\n\n characters = get_used_characters_in_str(text)\n\n char_list = list(characters)\n if verbosity >= 2:\n print(\"Characters:\")\n print(\" \" + str(char_list))\n res[\"chars\"] = characters # set of characters used in the input text\n\n char_ranges = _get_char_ranges(char_list)\n if verbosity >= 2:\n print(\"Character ranges:\")\n print(\" \" + str(char_ranges))\n \n uranges_str = ', '.join(r.get_range() for r in char_ranges)\n uranges = [[subsetname, uranges_str]] # subsetname here will be in the generated font, eg 'Arial.FontimizeSubset.woff2'\n if verbosity >= 2:\n print(\"Unicode ranges:\")\n print(\" \" + uranges_str) \n res[\"uranges\"] = uranges_str # list of unicode ranges matching the characters used in the input text\n\n # For each font, generate a new font file using only the used characters\n # By default, place it in the same folder as the respective font, unless fontpath is specified\n res[\"fonts\"] = {} # dict of old font path -> new font path\n for font in fonts:\n assetdir = fontpath if fontpath else path.dirname(font)\n t2w = TTF2Web(font, uranges, assetdir=assetdir)\n woff2_list = t2w.generateWoff2(verbosity=verbosity)\n # print(woff2_list)\n assert len(woff2_list) == 1 # We only expect one font file to be generated, per font input\n assert len(woff2_list[0]) == 2 # Pair of font, plus ranges -- we only care about [0], the font\n res[\"fonts\"][font] = woff2_list[0][0]\n\n if verbosity >= 2:\n print(\"Generated the following fonts from the originals:\")\n for k in res[\"fonts\"].keys():\n print(\" \" + k + \" ->\\n \" + res[\"fonts\"][k])\n\n if (verbosity >= 2) or print_stats:\n print(\"Results:\")\n print(\" Fonts processed: \" + str(len(res[\"fonts\"])))\n if (verbosity == 1): # If 2, printed above already\n print(\" Generated (use verbose output for input -> generated map):\")\n for k in res[\"fonts\"].keys():\n print(\" \" + res[\"fonts\"][k])\n sum_orig = _get_file_size_sum(list(res[\"fonts\"].keys()))\n sum_new = _get_file_size_sum(list(res[\"fonts\"].values())) \n print(\" Total original font size: \" + _file_size_to_readable(sum_orig))\n print(\" Total optimised font size: \" + _file_size_to_readable(sum_new))\n savings = sum_orig - sum_new;\n savings_percent = savings / sum_orig * 100 \n print(\" Savings: \" + _file_size_to_readable(savings) + \" less, which is \" + str(round(savings_percent, 1)) + \"%!\")\n print(\"Thankyou for using Fontimize!\") # A play on Font and Optimise, haha, so good pun clever. But seriously - hopefully a memorable name!\n\n return res" }, { "identifier": "optimise_fonts_for_files", "path": "fontimize.py", "snippet": "def optimise_fonts_for_files(files : list[str], font_output_dir = \"\", subsetname = \"FontimizeSubset\", verbose : bool = False, print_stats : bool = True, fonts : list[str] = [], addtl_text : str = \"\") -> dict[str, typing.Any]:\n if (len(files) == 0) and len(addtl_text) == 0: # If you specify any text, input files are optional -- note, not documented, used for cmd line app\n print(\"Error: No input files. Exiting.\")\n res = {\n \"css\" : [],\n \"fonts\" : [],\n \"chars\": set(),\n \"uranges\": []\n }\n \n text = addtl_text\n css_files : set[str] = set()\n font_files : set[str] = set()\n for f in fonts: # user-specified input font files\n font_files.add(f)\n\n for f in files:\n file_ext = pathlib.Path(f).suffix.lower()\n with open(f, 'r') as file:\n if file_ext == '.html' or file_ext == '.htm':\n html = file.read()\n soup = BeautifulSoup(html, 'html.parser')\n\n # Extract used text\n text += soup.get_text()\n\n # Extract CSS files the HTML references\n for link in soup.find_all('link', href=True):\n if 'css' in link['href']:\n css_ref = link['href']\n adjusted_css_path = _get_path(f, css_ref) # It'll be relative, so relative to the HTML file\n css_files.add(adjusted_css_path)\n else: # not HTML, treat as text\n text += file.read()\n\n # Sanity check that there is any text to process\n if len(text) == 0:\n print(\"Error: No text found in the input files or additional text. Exiting.\")\n res = {\n \"css\" : [],\n \"fonts\" : [],\n \"chars\": set(),\n \"uranges\": []\n }\n return res\n\n # Extract fonts from CSS files\n for css_file in css_files:\n with open(css_file, 'r') as file:\n css = file.read()\n\n # Extract the contents of all :before and :after CSS pseudo-elements; add these to the text\n pseudo_elements = _extract_pseudo_elements_content(css)\n for pe in pseudo_elements:\n text += pe\n\n # List of all fonts from @font-face src url: statements. This assumes they're all local files\n font_urls = _find_font_face_urls(css)\n for font_url in font_urls:\n # Only handle local files -- this does not support remote files\n adjusted_font_path = _get_path(adjusted_css_path, font_url) # Relative to the CSS file\n if path.isfile(adjusted_font_path):\n font_files.add(adjusted_font_path)\n else:\n # if verbose:\n print(\"Warning: Font file not found (may be remote not local?); skipping: \" + font_url + \" (resolved to \" + adjusted_font_path + \")\")\n\n if verbose:\n print(\"Found the following CSS files:\")\n for css_file in css_files:\n print(\" \" + css_file)\n\n print(\"Found the following fonts:\")\n for font_file in font_files:\n print(\" \" + font_file)\n\n # print(\"Found the following text:\")\n # print(text)\n \n if len(font_files) == 0:\n print(\"Error: No fonts found in the input files. Exiting.\")\n res = {\n \"css\" : css_files,\n \"fonts\" : [],\n \"chars\": set(),\n \"uranges\": []\n }\n return res\n\n res = optimise_fonts(text, font_files, fontpath=font_output_dir, subsetname=subsetname, verbose=verbose, print_stats=print_stats)\n res[\"css\"] = css_files\n return res;" } ]
import os import unittest import sys from unittest.mock import patch from fontimize import get_used_characters_in_html, charPair, _get_char_ranges, optimise_fonts, optimise_fonts_for_files from fontTools.ttLib import woff2, TTFont
4,243
def test_html_with_links(self): self.assertEqual(get_used_characters_in_html('<html><body><a href="https://example.com">Hello, World!</a></body></html>'), set('Hello, World!')) def test_html_with_nested_tags(self): self.assertEqual(get_used_characters_in_html('<html><body><div><span>Hello, </span><a href="https://example.com">World!</a></span></div></body></html>'), set('Hello, World!')) class TestCharPairs(unittest.TestCase): def test_get_range_with_single_char(self): self.assertEqual(charPair('a', 'a').get_range(), 'U+0061') # Note that the second of the pair does not have the "U+" -- this caught me out # with parse errors inside TTF2Web() def test_get_range_with_two_chars(self): self.assertEqual(charPair('a', 'b').get_range(), 'U+0061-0062') def test_get_range_with_multiple_chars(self): self.assertEqual(charPair('a', 'd').get_range(), 'U+0061-0064') class TestCharRanges(unittest.TestCase): def test_empty(self): self.assertEqual(_get_char_ranges([]), []) def test_single_char(self): self.assertEqual(_get_char_ranges(['a']), [charPair('a', 'a')]) def test_two_sequential_chars(self): self.assertEqual(_get_char_ranges(['a', 'b']), [charPair('a', 'b')]) def test_two_nonsequential_chars(self): self.assertEqual(_get_char_ranges(['a', 'c']), [charPair('a', 'a'), charPair('c', 'c')]) def test_multiple_ranges(self): self.assertEqual(_get_char_ranges(['a', 'b', 'd', 'e', 'f', 'h']), [charPair('a', 'b'), charPair('d', 'f'), charPair('h', 'h')]) # Used to verify the number of glyphs in a font matches the number of (unique!) characters in the test string def _count_glyphs_in_font(fontpath): # with open(fontpath, 'rb') as f: # wfr = woff2.WOFF2Reader(f) # cmap = font['cmap'] # return len(cmap.getBestCmap()) # font.flavor = None # Decompress the font data font = TTFont(fontpath)#flavor='woff2')#, sfntReader=wfr) font.flavor = None # Decompress the font data num_glyphs = font['maxp'].numGlyphs # Use font.getGlyphOrder() and https://fontdrop.info to examine, if weird return num_glyphs # Does a named glyph exist in the font? def _font_contains(fontpath, charname : str) -> bool: font = TTFont(fontpath) font.flavor = None # Decompress the font data return charname in font.getGlyphOrder() class TestOptimiseFonts(unittest.TestCase): # Contains unique characters, none repeated, a couple of capitals, some symbols, and 26 lowercase test_string = " ,.@QT_abcdefghijklmnopqrstuvwxyz" def test_optimise_fonts_with_single_font(self): result = optimise_fonts(self.test_string, ['tests/Spirax-Regular.ttf'], fontpath='tests/output', verbose=False, print_stats=False) # Basics self.assertIsInstance(result, dict) foundfonts = result["fonts"] self.assertIn('tests/Spirax-Regular.ttf', foundfonts) # Generated with the right name self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2') # If the number of glyphs in the font matches the expected number # For +1, see test_optimise_fonts_with_empty_text self.assertEqual(len(self.test_string) + 1, _count_glyphs_in_font(foundfonts['tests/Spirax-Regular.ttf'])) def test_optimise_fonts_with_multiple_fonts(self): result = optimise_fonts(self.test_string, ['tests/Spirax-Regular.ttf', 'tests/EBGaramond-VariableFont_wght.ttf', 'tests/EBGaramond-Italic-VariableFont_wght.ttf'], fontpath='tests/output', verbose=False, print_stats=False) self.assertIsInstance(result, dict) foundfonts = result["fonts"] self.assertIn('tests/Spirax-Regular.ttf', foundfonts) self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2') self.assertIn('tests/EBGaramond-VariableFont_wght.ttf', foundfonts) self.assertEqual(foundfonts['tests/EBGaramond-VariableFont_wght.ttf'], 'tests/output/EBGaramond-VariableFont_wght.FontimizeSubset.woff2') self.assertIn('tests/EBGaramond-Italic-VariableFont_wght.ttf', foundfonts) self.assertEqual(foundfonts['tests/EBGaramond-Italic-VariableFont_wght.ttf'], 'tests/output/EBGaramond-Italic-VariableFont_wght.FontimizeSubset.woff2') # If the number of glyphs in the font matches the expected number # + 1 for the tests below -- see test_optimise_fonts_with_empty_text self.assertEqual(len(self.test_string) + 1, _count_glyphs_in_font('tests/output/Spirax-Regular.FontimizeSubset.woff2')) # + 16, + 12: EB Garamond contains multiple f-ligatures (eg fi), plus other variants, so the number of glyphs is higher. Italic has fewer. self.assertEqual(len(self.test_string) + 1 + 16, _count_glyphs_in_font('tests/output/EBGaramond-VariableFont_wght.FontimizeSubset.woff2')) self.assertEqual(len(self.test_string) + 1 + 12, _count_glyphs_in_font('tests/output/EBGaramond-Italic-VariableFont_wght.FontimizeSubset.woff2')) def test_optimise_fonts_with_empty_text(self): result = optimise_fonts("", ['tests/Spirax-Regular.ttf'], fontpath='tests/output', verbose=False, print_stats=False) self.assertIsInstance(result, dict) foundfonts = result["fonts"] self.assertIn('tests/Spirax-Regular.ttf', foundfonts) self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2') # If the number of glyphs in the font matches the expected number: two, because an empty string is reported as containing space, see get_used_characters_in_str # and fonts also seem to contain ".notdef": # > font.getGlyphOrder() # > ['.notdef', 'space'] self.assertEqual(2, _count_glyphs_in_font('tests/output/Spirax-Regular.FontimizeSubset.woff2')) class TestOptimiseFontsForFiles(unittest.TestCase): def setUp(self): self.files = ['tests/test1-index-css.html', 'tests/test.txt', 'tests/test2.html'] self.font_output_dir = 'tests/output' self.subsetname = 'TestFilesSubset' self.verbose = False self.print_stats = False # Not used by any HTML/CSS, mimics manually adding a font self.fonts = ['tests/Whisper-Regular.ttf', 'tests/NotoSans-VariableFont_wdth,wght.ttf', 'tests/NotoSansJP-VariableFont_wght.ttf'] @patch.object(sys, 'stdout') # provides mock_stdout in order to hide and verify console output def test_optimise_fonts_for_files(self, mock_stdout):
class TestGetUsedCharactersInHtml(unittest.TestCase): def test_empty_html(self): self.assertEqual(get_used_characters_in_html(''), set(' ')) def test_html_with_no_text(self): self.assertEqual(get_used_characters_in_html('<html><body></body></html>'), set(' ')) def test_html_with_text(self): self.assertEqual(get_used_characters_in_html('<html><body>Hello, World!</body></html>'), set('Hello, World!')) def test_html_with_repeated_text(self): self.assertEqual(get_used_characters_in_html('<html><body>Hello, World! Hello, World!</body></html>'), set('Hello, World!')) def test_html_with_multiple_spans(self): self.assertEqual(get_used_characters_in_html('<html><body><span>Hello</span><span>, </span><span>World!</span></body></html>'), set('Hello, World!')) def test_html_with_multiple_divs(self): self.assertEqual(get_used_characters_in_html('<html><body><div>Hello</div><div>, </div><div>World!</div></body></html>'), set('Hello, World!')) def test_html_with_links(self): self.assertEqual(get_used_characters_in_html('<html><body><a href="https://example.com">Hello, World!</a></body></html>'), set('Hello, World!')) def test_html_with_nested_tags(self): self.assertEqual(get_used_characters_in_html('<html><body><div><span>Hello, </span><a href="https://example.com">World!</a></span></div></body></html>'), set('Hello, World!')) class TestCharPairs(unittest.TestCase): def test_get_range_with_single_char(self): self.assertEqual(charPair('a', 'a').get_range(), 'U+0061') # Note that the second of the pair does not have the "U+" -- this caught me out # with parse errors inside TTF2Web() def test_get_range_with_two_chars(self): self.assertEqual(charPair('a', 'b').get_range(), 'U+0061-0062') def test_get_range_with_multiple_chars(self): self.assertEqual(charPair('a', 'd').get_range(), 'U+0061-0064') class TestCharRanges(unittest.TestCase): def test_empty(self): self.assertEqual(_get_char_ranges([]), []) def test_single_char(self): self.assertEqual(_get_char_ranges(['a']), [charPair('a', 'a')]) def test_two_sequential_chars(self): self.assertEqual(_get_char_ranges(['a', 'b']), [charPair('a', 'b')]) def test_two_nonsequential_chars(self): self.assertEqual(_get_char_ranges(['a', 'c']), [charPair('a', 'a'), charPair('c', 'c')]) def test_multiple_ranges(self): self.assertEqual(_get_char_ranges(['a', 'b', 'd', 'e', 'f', 'h']), [charPair('a', 'b'), charPair('d', 'f'), charPair('h', 'h')]) # Used to verify the number of glyphs in a font matches the number of (unique!) characters in the test string def _count_glyphs_in_font(fontpath): # with open(fontpath, 'rb') as f: # wfr = woff2.WOFF2Reader(f) # cmap = font['cmap'] # return len(cmap.getBestCmap()) # font.flavor = None # Decompress the font data font = TTFont(fontpath)#flavor='woff2')#, sfntReader=wfr) font.flavor = None # Decompress the font data num_glyphs = font['maxp'].numGlyphs # Use font.getGlyphOrder() and https://fontdrop.info to examine, if weird return num_glyphs # Does a named glyph exist in the font? def _font_contains(fontpath, charname : str) -> bool: font = TTFont(fontpath) font.flavor = None # Decompress the font data return charname in font.getGlyphOrder() class TestOptimiseFonts(unittest.TestCase): # Contains unique characters, none repeated, a couple of capitals, some symbols, and 26 lowercase test_string = " ,.@QT_abcdefghijklmnopqrstuvwxyz" def test_optimise_fonts_with_single_font(self): result = optimise_fonts(self.test_string, ['tests/Spirax-Regular.ttf'], fontpath='tests/output', verbose=False, print_stats=False) # Basics self.assertIsInstance(result, dict) foundfonts = result["fonts"] self.assertIn('tests/Spirax-Regular.ttf', foundfonts) # Generated with the right name self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2') # If the number of glyphs in the font matches the expected number # For +1, see test_optimise_fonts_with_empty_text self.assertEqual(len(self.test_string) + 1, _count_glyphs_in_font(foundfonts['tests/Spirax-Regular.ttf'])) def test_optimise_fonts_with_multiple_fonts(self): result = optimise_fonts(self.test_string, ['tests/Spirax-Regular.ttf', 'tests/EBGaramond-VariableFont_wght.ttf', 'tests/EBGaramond-Italic-VariableFont_wght.ttf'], fontpath='tests/output', verbose=False, print_stats=False) self.assertIsInstance(result, dict) foundfonts = result["fonts"] self.assertIn('tests/Spirax-Regular.ttf', foundfonts) self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2') self.assertIn('tests/EBGaramond-VariableFont_wght.ttf', foundfonts) self.assertEqual(foundfonts['tests/EBGaramond-VariableFont_wght.ttf'], 'tests/output/EBGaramond-VariableFont_wght.FontimizeSubset.woff2') self.assertIn('tests/EBGaramond-Italic-VariableFont_wght.ttf', foundfonts) self.assertEqual(foundfonts['tests/EBGaramond-Italic-VariableFont_wght.ttf'], 'tests/output/EBGaramond-Italic-VariableFont_wght.FontimizeSubset.woff2') # If the number of glyphs in the font matches the expected number # + 1 for the tests below -- see test_optimise_fonts_with_empty_text self.assertEqual(len(self.test_string) + 1, _count_glyphs_in_font('tests/output/Spirax-Regular.FontimizeSubset.woff2')) # + 16, + 12: EB Garamond contains multiple f-ligatures (eg fi), plus other variants, so the number of glyphs is higher. Italic has fewer. self.assertEqual(len(self.test_string) + 1 + 16, _count_glyphs_in_font('tests/output/EBGaramond-VariableFont_wght.FontimizeSubset.woff2')) self.assertEqual(len(self.test_string) + 1 + 12, _count_glyphs_in_font('tests/output/EBGaramond-Italic-VariableFont_wght.FontimizeSubset.woff2')) def test_optimise_fonts_with_empty_text(self): result = optimise_fonts("", ['tests/Spirax-Regular.ttf'], fontpath='tests/output', verbose=False, print_stats=False) self.assertIsInstance(result, dict) foundfonts = result["fonts"] self.assertIn('tests/Spirax-Regular.ttf', foundfonts) self.assertEqual(foundfonts['tests/Spirax-Regular.ttf'], 'tests/output/Spirax-Regular.FontimizeSubset.woff2') # If the number of glyphs in the font matches the expected number: two, because an empty string is reported as containing space, see get_used_characters_in_str # and fonts also seem to contain ".notdef": # > font.getGlyphOrder() # > ['.notdef', 'space'] self.assertEqual(2, _count_glyphs_in_font('tests/output/Spirax-Regular.FontimizeSubset.woff2')) class TestOptimiseFontsForFiles(unittest.TestCase): def setUp(self): self.files = ['tests/test1-index-css.html', 'tests/test.txt', 'tests/test2.html'] self.font_output_dir = 'tests/output' self.subsetname = 'TestFilesSubset' self.verbose = False self.print_stats = False # Not used by any HTML/CSS, mimics manually adding a font self.fonts = ['tests/Whisper-Regular.ttf', 'tests/NotoSans-VariableFont_wdth,wght.ttf', 'tests/NotoSansJP-VariableFont_wght.ttf'] @patch.object(sys, 'stdout') # provides mock_stdout in order to hide and verify console output def test_optimise_fonts_for_files(self, mock_stdout):
result = optimise_fonts_for_files(files=self.files, font_output_dir=self.font_output_dir, subsetname=self.subsetname, fonts=self.fonts,
4
2023-12-07 13:23:46+00:00
8k
wanghao-cst/Omni-VideoAssistant
llava/model/omni_arch.py
[ { "identifier": "build_vision_tower", "path": "llava/model/multimodal_encoder/builder.py", "snippet": "def build_vision_tower(vision_tower_cfg, **kwargs):\n vision_tower = getattr(vision_tower_cfg, 'mm_vision_tower', getattr(vision_tower_cfg, 'vision_tower', None))\n is_absolute_path_exists = os.path.exists(vision_tower)\n if is_absolute_path_exists or vision_tower.startswith(\"openai\") or vision_tower.startswith(\"laion\"):\n return CLIPVisionTower(vision_tower, args=vision_tower_cfg, **kwargs)\n\n raise ValueError(f'Unknown vision tower: {vision_tower}')" }, { "identifier": "build_vision_projector", "path": "llava/model/multimodal_projector/builder.py", "snippet": "def build_vision_projector(config, delay_load=False, **kwargs):\n projector_type = getattr(config, 'mm_projector_type', 'linear')\n\n if projector_type == 'linear':\n return nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n mlp_gelu_match = re.match(r'^mlp(\\d+)x_gelu$', projector_type)\n if mlp_gelu_match:\n mlp_depth = int(mlp_gelu_match.group(1))\n modules = [nn.Linear(config.mm_hidden_size, config.hidden_size)]\n for _ in range(1, mlp_depth):\n modules.append(nn.GELU())\n modules.append(nn.Linear(config.hidden_size, config.hidden_size))\n # import pdb;pdb.set_trace()\n return nn.Sequential(*modules)\n\n if projector_type == 'identity':\n return IdentityMap()\n\n raise ValueError(f'Unknown projector type: {projector_type}')" }, { "identifier": "IGNORE_INDEX", "path": "llava/constants.py", "snippet": "IGNORE_INDEX = -100" }, { "identifier": "IMAGE_TOKEN_INDEX", "path": "llava/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "DEFAULT_IMAGE_PATCH_TOKEN", "path": "llava/constants.py", "snippet": "DEFAULT_IMAGE_PATCH_TOKEN = \"<im_patch>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "llava/constants.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "llava/constants.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" } ]
from abc import ABC, abstractmethod from .multimodal_encoder.builder import build_vision_tower from .multimodal_projector.builder import build_vision_projector from llava.constants import IGNORE_INDEX, IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_PATCH_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN import torch import torch.nn as nn
4,777
new_input_embeds = [] new_labels = [] if labels is not None else None cur_video_idx = 0 for batch_idx, cur_input_ids in enumerate(input_ids): # torch.Size([4, 375]) if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # 1 False # multimodal LLM, but the current sample is not multimodal # FIXME: this is a hacky fix, for deepspeed zero3 to work half_len = cur_input_ids.shape[0] // 2 cur_frames_features = key_frames_feature[cur_video_idx] cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len]) cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:]) # import pdb;pdb.set_trace() # cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0:0], cur_input_embeds_2], dim=0) cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0], cur_input_embeds_2], dim=0) # cur_input_embeds = torch.cat([cur_input_embeds_1, cur_input_embeds_2], dim=0) new_input_embeds.append(cur_input_embeds) if labels is not None: new_labels.append(labels[batch_idx]) cur_video_idx += 1 # import pdb;pdb.set_trace() # never enter it continue image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # (tensor([35], device='cuda:0'),) cur_new_input_embeds = [] if labels is not None: # torch.Size([4, 375]) cur_labels = labels[batch_idx] # torch.Size([375]): -100...labels...-100 cur_new_labels = [] assert cur_labels.shape == cur_input_ids.shape while image_token_indices.numel() > 0: # 统计元素个数 1 # import pdb;pdb.set_trace() # if cur_video_idx > len(key_frames_feature)-1: # cur_frames_features = key_frames_feature[-1] # for gradio demo # else: cur_frames_features = key_frames_feature[cur_video_idx] # torch.Size([4, 256, 4096]) cur_frames_features = cur_frames_features.reshape(-1,4096) # torch.Size([1024, 4096]) image_token_start = image_token_indices[0] # tensor(35, device='cuda:0') if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach()) cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start])) cur_new_input_embeds.append(cur_frames_features) cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2])) if labels is not None: cur_new_labels.append(cur_labels[:image_token_start]) cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) cur_new_labels.append(cur_labels[image_token_start:image_token_start+1]) cur_labels = cur_labels[image_token_start+2:] else: # True # import pdb;pdb.set_trace() cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) # instru部分的embed: torch.Size([35, 4096]) cur_new_input_embeds.append(cur_frames_features) # torch.Size([1024, 4096]) input加入frames特征 if labels is not None: cur_new_labels.append(cur_labels[:image_token_start]) # torch.Size([35]) 全-100 cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) # torch.Size([1024]) cur_labels = cur_labels[image_token_start+1:] # 339 = 375-35-1(img_token) 稍后加到cur_new_labels中 cur_video_idx += 1 if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False cur_input_ids = cur_input_ids[image_token_start+2:] else: cur_input_ids = cur_input_ids[image_token_start+1:] # torch.Size([339]) image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # 空 if cur_input_ids.numel() > 0: # True if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach()) else: cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids)) # [torch.Size([35, 4096])固定template,torch.Size([1024, 4096])图像特征, QA:torch.Size([339, 4096])] if labels is not None: cur_new_labels.append(cur_labels) # [torch.Size([35]),torch.Size([1024]), 前面全为-100 torch.Size([339])] cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds] cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) # torch.Size([1398, 4096]): 35+1024+339 new_input_embeds.append(cur_new_input_embeds) if labels is not None: cur_new_labels = torch.cat(cur_new_labels, dim=0) # torch.Size([1398]) new_labels.append(cur_new_labels) if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): # True max_len = max(x.shape[0] for x in new_input_embeds) # 1910 new_input_embeds_align = [] for cur_new_embed in new_input_embeds: cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0) new_input_embeds_align.append(cur_new_embed) new_input_embeds = torch.stack(new_input_embeds_align, dim=0) if labels is not None: new_labels_align = [] _new_labels = new_labels for cur_new_label in new_labels: cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0) new_labels_align.append(cur_new_label) new_labels = torch.stack(new_labels_align, dim=0) if attention_mask is not None: new_attention_mask = [] for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels): new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device) new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device) cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0) new_attention_mask.append(cur_new_attention_mask) attention_mask = torch.stack(new_attention_mask, dim=0) assert attention_mask.shape == new_labels.shape else: # False img模式默认只有256长度相等 # import pdb;pdb.set_trace() new_input_embeds = torch.stack(new_input_embeds, dim=0) # torch.Size([4, 716, 4096]) 716=461-1imgtoken+256imgfeature if labels is not None: # torch.Size([4, 461]) new_labels = torch.stack(new_labels, dim=0) # torch.Size([4, 716]) if attention_mask is not None: # torch.Size([4, 461]) new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) # torch.Size([4, 255]个True 相当于256个img特征-1个imgtoken attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) # torch.Size([4, 716]) 716=461+255(新加入的img特征255个token mask为True) assert attention_mask.shape == new_input_embeds.shape[:2] return None, attention_mask, past_key_values, new_input_embeds, new_labels def initialize_vision_tokenizer(self, model_args, tokenizer): if model_args.mm_use_im_patch_token: # False
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class OmniMetaModel: def __init__(self, config): super(OmniMetaModel, self).__init__(config) # import pdb;pdb.set_trace() if hasattr(config, "mm_vision_tower"): # train False, v1.5 continue finetune True self.vision_tower = build_vision_tower(config, delay_load=True) self.mm_projector = build_vision_projector(config) # import pdb;pdb.set_trace() if hasattr(config, "mm_video_fuser"): # self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096 self.frames_conv = nn.Conv2d(576, 576, kernel_size=(12,1), stride=(10,1)) # self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096 for exp1 test uncomment it def get_vision_tower(self): vision_tower = getattr(self, 'vision_tower', None) if type(vision_tower) is list: vision_tower = vision_tower[0] return vision_tower def initialize_vision_modules(self, model_args, fsdp=None): # Train vision_tower = model_args.vision_tower # 'openai/clip-vit-large-patch14' mm_vision_select_layer = model_args.mm_vision_select_layer # -2 mm_vision_select_feature = model_args.mm_vision_select_feature # patch pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter # '/home/wanghao/weights/llava/llava-pretrain-vicuna-7b-v1.3/mm_projector.bin' self.config.mm_vision_tower = vision_tower # import pdb;pdb.set_trace() # vision_tower = build_vision_tower(model_args) if self.get_vision_tower() is None: ## 初次fintune会走这,且require_grad=True,continue时fromepretrain已经有 vision_tower = build_vision_tower(model_args) if fsdp is not None and len(fsdp) > 0: self.vision_tower = [vision_tower] else: self.vision_tower = vision_tower else: ## Implement continue finetuning. if fsdp is not None and len(fsdp) > 0: vision_tower = self.vision_tower[0] else: vision_tower = self.vision_tower vision_tower.load_model() self.config.use_mm_proj = True self.config.mm_projector_type = getattr(model_args, 'mm_projector_type', 'linear') self.config.mm_hidden_size = vision_tower.hidden_size # 1024 self.config.mm_vision_select_layer = mm_vision_select_layer # -2 self.config.mm_vision_select_feature = mm_vision_select_feature # patch # self.mm_projector = build_vision_projector(self.config) # 1024->4096 if getattr(self, 'mm_projector', None) is None: ## 初次fintune会走这,且require_grad=True,continue时fromepretrain已经有 self.mm_projector = build_vision_projector(self.config) else: # In case it is frozen by LoRA for p in self.mm_projector.parameters(): p.requires_grad = True # import pdb;pdb.set_trace() if pretrain_mm_mlp_adapter is not None: mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu') def get_w(weights, keyword): return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k} # weight:torch.Size([4096, 1024]) bias:torch.Size([4096]) # import pdb;pdb.set_trace() self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector')) # v1.5: mm_projector_weights['model.mm_projector.0.weight'].shape: torch.Size([4096, 1024]) # model.mm_projector.0.bias: torch.Size([4096]); model.mm_projector.2.weight: torch.Size([4096, 4096]); model.mm_projector.2.bias: torch.Size([4096]) if getattr(self, 'frames_conv', None) is None: ## Implement continue finetuning. # self.frames_attn = MultiheadAttention(256*4096, num_heads) # self.frames_conv = nn.Conv2d(4096, 4096, kernel_size=(12,1), stride=(10,1)) # b 4096 51 256 # self.frames_conv = nn.Conv2d(256, 256, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096 self.frames_conv = nn.Conv2d(576, 576, kernel_size=(12,1), stride=(10,1)) # b 256 51 4096 # self.keyframes_attn = MultiheadAttention(256*4096, num_heads) # import pdb;pdb.set_trace() self.config.mm_video_fuser = 'frames_conv' class OmniMetaForCausalLM(ABC): @abstractmethod def get_model(self): pass def get_vision_tower(self): return self.get_model().get_vision_tower() def encode_frames(self, frames): frames_features = self.get_model().get_vision_tower()(frames) # torch.Size([276, 256, 1024]) frames_features = self.get_model().mm_projector(frames_features) # torch.Size([276, 256, 4096]) torch.float16 return frames_features def prepare_inputs_labels_for_multimodal( self, input_ids, attention_mask, past_key_values, labels, videos ): vision_tower = self.get_vision_tower() # import pdb;pdb.set_trace() # frames_attn = self.get_model().frames_attn frames_conv = self.get_model().frames_conv # keyframes_attn = self.get_model().keyframes_attn # import pdb;pdb.set_trace() if vision_tower is None or videos is None or input_ids.shape[1] == 1: # False if past_key_values is not None and vision_tower is not None and videos is not None and input_ids.shape[1] == 1: attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device) return input_ids, attention_mask, past_key_values, None, labels # videos = [torch.Size([51, 3, 224, 224]), torch.Size([79, 3, 224, 224]), torch.Size([60, 3, 224, 224]), torch.Size([86, 3, 224, 224])] assert type(videos) is list or videos.ndim == 5 # True concat_frames = torch.cat([video for video in videos], dim=0) # torch.Size([79, 3, 336, 336]) # import pdb;pdb.set_trace() frames_features = self.encode_frames(concat_frames) # torch.Size([276, 256, 4096]) torch.Size([79, 576, 4096]) split_sizes = [video.shape[0] for video in videos] # [51, 79, 60, 86] frames_features = torch.split(frames_features, split_sizes, dim=0) # (torch.Size([51, 256, 4096]), torch.Size([79, 256, 4096]), torch.Size([60, 256, 4096]), torch.Size([86, 256, 4096])) # import pdb;pdb.set_trace() # frames_features = [x.flatten(0, 1) for x in frames_features] key_frames_feature = [] for frame_feature in frames_features: # import pdb;pdb.set_trace() frame_feature = frame_feature.unsqueeze(0) # b 51 256 4096 frame_feature = frame_feature.permute(0,2,1,3) # b 256 51 4096 # short video if frame_feature.shape[2] >= 12: frame_feature = frames_conv(frame_feature) # torch.Size([1, 256, 4, 4096]) frame_feature = frame_feature.squeeze(0).permute(1,0,2) # torch.Size([4, 256, 4096]) # key_frames_feature.append(frame_feature[:6]) # import pdb;pdb.set_trace() num_frames = frame_feature.shape[0] key_frames_feature.append(frame_feature[::max(1,num_frames//5)][:6]) # v1.5 576 patch new_input_embeds = [] new_labels = [] if labels is not None else None cur_video_idx = 0 for batch_idx, cur_input_ids in enumerate(input_ids): # torch.Size([4, 375]) if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # 1 False # multimodal LLM, but the current sample is not multimodal # FIXME: this is a hacky fix, for deepspeed zero3 to work half_len = cur_input_ids.shape[0] // 2 cur_frames_features = key_frames_feature[cur_video_idx] cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len]) cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:]) # import pdb;pdb.set_trace() # cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0:0], cur_input_embeds_2], dim=0) cur_input_embeds = torch.cat([cur_input_embeds_1, cur_frames_features[0], cur_input_embeds_2], dim=0) # cur_input_embeds = torch.cat([cur_input_embeds_1, cur_input_embeds_2], dim=0) new_input_embeds.append(cur_input_embeds) if labels is not None: new_labels.append(labels[batch_idx]) cur_video_idx += 1 # import pdb;pdb.set_trace() # never enter it continue image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # (tensor([35], device='cuda:0'),) cur_new_input_embeds = [] if labels is not None: # torch.Size([4, 375]) cur_labels = labels[batch_idx] # torch.Size([375]): -100...labels...-100 cur_new_labels = [] assert cur_labels.shape == cur_input_ids.shape while image_token_indices.numel() > 0: # 统计元素个数 1 # import pdb;pdb.set_trace() # if cur_video_idx > len(key_frames_feature)-1: # cur_frames_features = key_frames_feature[-1] # for gradio demo # else: cur_frames_features = key_frames_feature[cur_video_idx] # torch.Size([4, 256, 4096]) cur_frames_features = cur_frames_features.reshape(-1,4096) # torch.Size([1024, 4096]) image_token_start = image_token_indices[0] # tensor(35, device='cuda:0') if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach()) cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start])) cur_new_input_embeds.append(cur_frames_features) cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2])) if labels is not None: cur_new_labels.append(cur_labels[:image_token_start]) cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) cur_new_labels.append(cur_labels[image_token_start:image_token_start+1]) cur_labels = cur_labels[image_token_start+2:] else: # True # import pdb;pdb.set_trace() cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) # instru部分的embed: torch.Size([35, 4096]) cur_new_input_embeds.append(cur_frames_features) # torch.Size([1024, 4096]) input加入frames特征 if labels is not None: cur_new_labels.append(cur_labels[:image_token_start]) # torch.Size([35]) 全-100 cur_new_labels.append(torch.full((cur_frames_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) # torch.Size([1024]) cur_labels = cur_labels[image_token_start+1:] # 339 = 375-35-1(img_token) 稍后加到cur_new_labels中 cur_video_idx += 1 if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False cur_input_ids = cur_input_ids[image_token_start+2:] else: cur_input_ids = cur_input_ids[image_token_start+1:] # torch.Size([339]) image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] # 空 if cur_input_ids.numel() > 0: # True if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False): # False cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach()) else: cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids)) # [torch.Size([35, 4096])固定template,torch.Size([1024, 4096])图像特征, QA:torch.Size([339, 4096])] if labels is not None: cur_new_labels.append(cur_labels) # [torch.Size([35]),torch.Size([1024]), 前面全为-100 torch.Size([339])] cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds] cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) # torch.Size([1398, 4096]): 35+1024+339 new_input_embeds.append(cur_new_input_embeds) if labels is not None: cur_new_labels = torch.cat(cur_new_labels, dim=0) # torch.Size([1398]) new_labels.append(cur_new_labels) if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): # True max_len = max(x.shape[0] for x in new_input_embeds) # 1910 new_input_embeds_align = [] for cur_new_embed in new_input_embeds: cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0) new_input_embeds_align.append(cur_new_embed) new_input_embeds = torch.stack(new_input_embeds_align, dim=0) if labels is not None: new_labels_align = [] _new_labels = new_labels for cur_new_label in new_labels: cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0) new_labels_align.append(cur_new_label) new_labels = torch.stack(new_labels_align, dim=0) if attention_mask is not None: new_attention_mask = [] for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels): new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device) new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device) cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0) new_attention_mask.append(cur_new_attention_mask) attention_mask = torch.stack(new_attention_mask, dim=0) assert attention_mask.shape == new_labels.shape else: # False img模式默认只有256长度相等 # import pdb;pdb.set_trace() new_input_embeds = torch.stack(new_input_embeds, dim=0) # torch.Size([4, 716, 4096]) 716=461-1imgtoken+256imgfeature if labels is not None: # torch.Size([4, 461]) new_labels = torch.stack(new_labels, dim=0) # torch.Size([4, 716]) if attention_mask is not None: # torch.Size([4, 461]) new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) # torch.Size([4, 255]个True 相当于256个img特征-1个imgtoken attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) # torch.Size([4, 716]) 716=461+255(新加入的img特征255个token mask为True) assert attention_mask.shape == new_input_embeds.shape[:2] return None, attention_mask, past_key_values, new_input_embeds, new_labels def initialize_vision_tokenizer(self, model_args, tokenizer): if model_args.mm_use_im_patch_token: # False
tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)
4
2023-12-05 08:02:17+00:00
8k
OpenDriveLab/LaneSegNet
projects/lanesegnet/datasets/openlanev2_subset_A_lanesegnet_dataset.py
[ { "identifier": "lanesegnet_evaluate", "path": "projects/lanesegnet/datasets/openlanev2_evaluate_custom.py", "snippet": "def lanesegnet_evaluate(ground_truth, predictions, verbose=True):\n\n if isinstance(ground_truth, str):\n ground_truth = io.pickle_load(ground_truth)\n\n if predictions is None:\n preds = {}\n print('\\nDummy evaluation on ground truth.\\n')\n else:\n if isinstance(predictions, str):\n predictions = io.pickle_load(predictions)\n predictions = predictions['results']\n\n gts = {}\n preds = {}\n for token in ground_truth.keys():\n gts[token] = ground_truth[token]['annotation']\n if predictions is None:\n preds[token] = gts[token]\n for i, _ in enumerate(preds[token]['lane_segment']):\n preds[token]['lane_segment'][i]['confidence'] = np.float32(1)\n for i, _ in enumerate(preds[token]['area']):\n preds[token]['area'][i]['confidence'] = np.float32(1)\n for i, _ in enumerate(preds[token]['traffic_element']):\n preds[token]['traffic_element'][i]['confidence'] = np.float32(1)\n else:\n preds[token] = predictions[token]['predictions']\n\n assert set(gts.keys()) == set(preds.keys()), '#frame differs'\n\n \"\"\"\n calculate distances between gts and preds \n \"\"\"\n\n distance_matrices = {\n 'laneseg': {},\n 'area': {},\n }\n\n for token in tqdm(gts.keys(), desc='calculating distances:', ncols=80, disable=not verbose):\n\n mask = pairwise(\n [gt for gt in gts[token]['lane_segment']],\n [pred for pred in preds[token]['lane_segment']],\n lane_segment_distance_c,\n relax=True,\n ) < THRESHOLDS_LANESEG[-1]\n\n distance_matrices['laneseg'][token] = pairwise(\n [gt for gt in gts[token]['lane_segment']],\n [pred for pred in preds[token]['lane_segment']],\n lane_segment_distance,\n mask=mask,\n relax=True,\n )\n\n distance_matrices['area'][token] = pairwise(\n [gt for gt in gts[token]['area']],\n [pred for pred in preds[token]['area']],\n area_distance,\n )\n\n \"\"\"\n evaluate\n \"\"\"\n\n metrics = {\n 'mAP': 0\n }\n\n metrics['AP_ls'] = _mAP_over_threshold(\n gts=gts, \n preds=preds, \n distance_matrices=distance_matrices['laneseg'], \n distance_thresholds=THRESHOLDS_LANESEG,\n object_type='lane_segment',\n filter=lambda _: True,\n inject=True, # save tp for eval on graph\n ).mean()\n\n metrics['AP_ped'] = _mAP_over_threshold(\n gts=gts, \n preds=preds, \n distance_matrices=distance_matrices['area'], \n distance_thresholds=THRESHOLDS_AREA, \n object_type='area',\n filter=lambda x: x['category'] == 1,\n inject=False,\n ).mean()\n\n metrics['TOP_lsls'] = _mAP_topology_lsls(gts, preds, THRESHOLDS_LANESEG)\n\n metrics['mAP'] = (metrics['AP_ls'] + metrics['AP_ped']) / 2\n\n return metrics" }, { "identifier": "fix_pts_interpolate", "path": "projects/lanesegnet/core/lane/util.py", "snippet": "def fix_pts_interpolate(lane, n_points):\n ls = LineString(lane)\n distances = np.linspace(0, ls.length, n_points)\n lane = np.array([ls.interpolate(distance).coords[0] for distance in distances])\n return lane" } ]
import os import random import copy import numpy as np import torch import mmcv import cv2 import shapely from shapely.geometry import LineString from pyquaternion import Quaternion from mmcv.parallel import DataContainer as DC from mmdet.datasets import DATASETS from mmdet3d.datasets import Custom3DDataset from .openlanev2_evaluate_custom import lanesegnet_evaluate from ..core.lane.util import fix_pts_interpolate
3,726
right_boundary = lane['right_laneline'] LineString_right_boundary = LineString(right_boundary) gt_lanes.append([LineString_lane, LineString_left_boundary, LineString_right_boundary]) gt_lane_labels_3d.append(0) gt_lane_left_type.append(lane['left_laneline_type']) gt_lane_right_type.append(lane['right_laneline_type']) for area in ann_info['area']: if area['category'] == 1 and 'ped_crossing' in self.LANE_CLASSES: centerline, left_boundary, right_boundary = self.ped2lane_segment(area['points']) gt_lanes.append([centerline, left_boundary, right_boundary]) gt_lane_labels_3d.append(1) gt_lane_left_type.append(0) gt_lane_right_type.append(0) elif area['category'] == 2 and 'road_boundary' in self.LANE_CLASSES: raise NotImplementedError topology_lsls = np.array(ann_info['topology_lsls'], dtype=np.float32) te_bboxes = np.array([np.array(sign['points'], dtype=np.float32).flatten() for sign in ann_info['traffic_element']]) te_labels = np.array([sign['attribute'] for sign in ann_info['traffic_element']], dtype=np.int64) if len(te_bboxes) == 0: te_bboxes = np.zeros((0, 4), dtype=np.float32) te_labels = np.zeros((0, ), dtype=np.int64) topology_lste = np.array(ann_info['topology_lste'], dtype=np.float32) annos = dict( gt_lanes_3d = gt_lanes, gt_lane_labels_3d = gt_lane_labels_3d, gt_lane_adj = topology_lsls, bboxes = te_bboxes, labels = te_labels, gt_lane_lcte_adj = topology_lste, gt_lane_left_type = gt_lane_left_type, gt_lane_right_type = gt_lane_right_type, ) return annos def prepare_train_data(self, index): data_queue = [] # temporal aug prev_indexs_list = list(range(index-self.queue_length, index)) random.shuffle(prev_indexs_list) prev_indexs_list = sorted(prev_indexs_list[1:], reverse=True) input_dict = self.get_data_info(index) if input_dict is None: return None sample_idx = input_dict['sample_idx'] scene_token = input_dict['scene_token'] self.pre_pipeline(input_dict) example = self.pipeline(input_dict) if self.filter_empty_gt and \ (example is None or len(example['gt_lane_labels_3d']._data) == 0): return None if self.filter_empty_te and \ (example is None or len(example['gt_labels']._data) == 0): return None data_queue.insert(0, example) for i in prev_indexs_list: i = max(0, i) input_dict = self.get_data_info(i) if input_dict is None: return None if input_dict['sample_idx'] < sample_idx and input_dict['scene_token'] == scene_token: self.pre_pipeline(input_dict) example = self.pipeline(input_dict) if self.filter_empty_gt and \ (example is None or len(example['gt_lane_labels_3d']._data) == 0): return None sample_idx = input_dict['sample_idx'] data_queue.insert(0, copy.deepcopy(example)) return self.union2one(data_queue) def union2one(self, queue): """ convert sample queue into one single sample. """ imgs_list = [each['img'].data for each in queue] metas_map = {} prev_pos = None prev_angle = None for i, each in enumerate(queue): metas_map[i] = each['img_metas'].data if i == 0: metas_map[i]['prev_bev'] = False prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) metas_map[i]['can_bus'][:3] = 0 metas_map[i]['can_bus'][-1] = 0 else: metas_map[i]['prev_bev'] = True tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) metas_map[i]['can_bus'][:3] -= prev_pos metas_map[i]['can_bus'][-1] -= prev_angle prev_pos = copy.deepcopy(tmp_pos) prev_angle = copy.deepcopy(tmp_angle) queue[-1]['img'] = DC(torch.stack(imgs_list), cpu_only=False, stack=True) queue[-1]['img_metas'] = DC(metas_map, cpu_only=True) queue = queue[-1] return queue def format_openlanev2_gt(self): gt_dict = {} for idx in range(len(self.data_infos)): info = copy.deepcopy(self.data_infos[idx]) key = (self.split, info['segment_id'], str(info['timestamp'])) areas = [] for area in info['annotation']['area']: if area['category'] == 1: points = area['points']
#---------------------------------------------------------------------------------------# # LaneSegNet: Map Learning with Lane Segment Perception for Autonomous Driving # # Source code: https://github.com/OpenDriveLab/LaneSegNet # # Copyright (c) OpenDriveLab. All rights reserved. # #---------------------------------------------------------------------------------------# @DATASETS.register_module() class OpenLaneV2_subset_A_LaneSegNet_Dataset(Custom3DDataset): CAMS = ('ring_front_center', 'ring_front_left', 'ring_front_right', 'ring_rear_left', 'ring_rear_right', 'ring_side_left', 'ring_side_right') LANE_CLASSES = ('lane_segment', 'ped_crossing', 'road_boundary') TE_CLASSES = ('traffic_light', 'road_sign') TE_ATTR_CLASSES = ('unknown', 'red', 'green', 'yellow', 'go_straight', 'turn_left', 'turn_right', 'no_left_turn', 'no_right_turn', 'u_turn', 'no_u_turn', 'slight_left', 'slight_right') MAP_CHANGE_LOGS = [ '75e8adad-50a6-3245-8726-5e612db3d165', '54bc6dbc-ebfb-3fba-b5b3-57f88b4b79ca', 'af170aac-8465-3d7b-82c5-64147e94af7d', '6e106cf8-f6dd-38f6-89c8-9be7a71e7275', ] def __init__(self, data_root, ann_file, queue_length=1, filter_empty_te=False, filter_map_change=False, points_num=10, split='train', **kwargs): self.filter_map_change = filter_map_change self.split = split super().__init__(data_root, ann_file, **kwargs) self.queue_length = queue_length self.filter_empty_te = filter_empty_te self.points_num = points_num self.LANE_CLASSES = self.CLASSES def load_annotations(self, ann_file): """Load annotation from a olv2 pkl file. Args: ann_file (str): Path of the annotation file. Returns: list[dict]: Annotation info from the json file. """ data_infos = mmcv.load(ann_file, file_format='pkl') if isinstance(data_infos, dict): if self.filter_map_change and self.split == 'train': data_infos = [info for info in data_infos.values() if info['meta_data']['source_id'] not in self.MAP_CHANGE_LOGS] else: data_infos = list(data_infos.values()) return data_infos def get_data_info(self, index): """Get data info according to the given index. Args: index (int): Index of the sample data to get. Returns: dict: Data information that will be passed to the data \ preprocessing pipelines. """ info = self.data_infos[index] input_dict = dict( sample_idx=info['timestamp'], scene_token=info['segment_id'] ) if self.modality['use_camera']: image_paths = [] lidar2img_rts = [] lidar2cam_rts = [] cam_intrinsics = [] for cam_name, cam_info in info['sensor'].items(): image_path = cam_info['image_path'] image_paths.append(os.path.join(self.data_root, image_path)) # obtain lidar to image transformation matrix lidar2cam_r = np.linalg.inv(cam_info['extrinsic']['rotation']) lidar2cam_t = cam_info['extrinsic']['translation'] @ lidar2cam_r.T lidar2cam_rt = np.eye(4) lidar2cam_rt[:3, :3] = lidar2cam_r.T lidar2cam_rt[3, :3] = -lidar2cam_t intrinsic = np.array(cam_info['intrinsic']['K']) viewpad = np.eye(4) viewpad[:intrinsic.shape[0], :intrinsic.shape[1]] = intrinsic lidar2img_rt = (viewpad @ lidar2cam_rt.T) lidar2img_rts.append(lidar2img_rt) cam_intrinsics.append(viewpad) lidar2cam_rts.append(lidar2cam_rt.T) input_dict.update( dict( img_filename=image_paths, lidar2img=lidar2img_rts, cam_intrinsic=cam_intrinsics, lidar2cam=lidar2cam_rts, )) if not self.test_mode: annos = self.get_ann_info(index) input_dict['ann_info'] = annos if self.filter_empty_gt and len(annos['gt_lane_labels_3d']) == 0: return None if self.filter_empty_te and len(annos['labels']) == 0: return None can_bus = np.zeros(18) rotation = Quaternion._from_matrix(np.array(info['pose']['rotation'])) can_bus[:3] = info['pose']['translation'] can_bus[3:7] = rotation patch_angle = rotation.yaw_pitch_roll[0] / np.pi * 180 if patch_angle < 0: patch_angle += 360 can_bus[-2] = patch_angle / 180 * np.pi can_bus[-1] = patch_angle input_dict['can_bus'] = can_bus input_dict['lidar2global_rotation'] = np.array(info['pose']['rotation']) return input_dict def ped2lane_segment(self, points): assert points.shape[0] == 5 dir_vector = points[1] - points[0] dir = np.rad2deg(np.arctan2(dir_vector[1], dir_vector[0])) if dir < -45 or dir > 135: left_boundary = points[[2, 3]] right_boundary = points[[1, 0]] else: left_boundary = points[[0, 1]] right_boundary = points[[3, 2]] centerline = LineString((left_boundary + right_boundary) / 2) left_boundary = LineString(left_boundary) right_boundary = LineString(right_boundary) return centerline, left_boundary, right_boundary def get_ann_info(self, index): """Get annotation info according to the given index. Args: index (int): Index of the annotation data to get. Returns: dict: annotation information """ info = self.data_infos[index] ann_info = info['annotation'] gt_lanes = [] gt_lane_labels_3d = [] gt_lane_left_type = [] gt_lane_right_type = [] for idx, lane in enumerate(ann_info['lane_segment']): centerline = lane['centerline'] LineString_lane = LineString(centerline) left_boundary = lane['left_laneline'] LineString_left_boundary = LineString(left_boundary) right_boundary = lane['right_laneline'] LineString_right_boundary = LineString(right_boundary) gt_lanes.append([LineString_lane, LineString_left_boundary, LineString_right_boundary]) gt_lane_labels_3d.append(0) gt_lane_left_type.append(lane['left_laneline_type']) gt_lane_right_type.append(lane['right_laneline_type']) for area in ann_info['area']: if area['category'] == 1 and 'ped_crossing' in self.LANE_CLASSES: centerline, left_boundary, right_boundary = self.ped2lane_segment(area['points']) gt_lanes.append([centerline, left_boundary, right_boundary]) gt_lane_labels_3d.append(1) gt_lane_left_type.append(0) gt_lane_right_type.append(0) elif area['category'] == 2 and 'road_boundary' in self.LANE_CLASSES: raise NotImplementedError topology_lsls = np.array(ann_info['topology_lsls'], dtype=np.float32) te_bboxes = np.array([np.array(sign['points'], dtype=np.float32).flatten() for sign in ann_info['traffic_element']]) te_labels = np.array([sign['attribute'] for sign in ann_info['traffic_element']], dtype=np.int64) if len(te_bboxes) == 0: te_bboxes = np.zeros((0, 4), dtype=np.float32) te_labels = np.zeros((0, ), dtype=np.int64) topology_lste = np.array(ann_info['topology_lste'], dtype=np.float32) annos = dict( gt_lanes_3d = gt_lanes, gt_lane_labels_3d = gt_lane_labels_3d, gt_lane_adj = topology_lsls, bboxes = te_bboxes, labels = te_labels, gt_lane_lcte_adj = topology_lste, gt_lane_left_type = gt_lane_left_type, gt_lane_right_type = gt_lane_right_type, ) return annos def prepare_train_data(self, index): data_queue = [] # temporal aug prev_indexs_list = list(range(index-self.queue_length, index)) random.shuffle(prev_indexs_list) prev_indexs_list = sorted(prev_indexs_list[1:], reverse=True) input_dict = self.get_data_info(index) if input_dict is None: return None sample_idx = input_dict['sample_idx'] scene_token = input_dict['scene_token'] self.pre_pipeline(input_dict) example = self.pipeline(input_dict) if self.filter_empty_gt and \ (example is None or len(example['gt_lane_labels_3d']._data) == 0): return None if self.filter_empty_te and \ (example is None or len(example['gt_labels']._data) == 0): return None data_queue.insert(0, example) for i in prev_indexs_list: i = max(0, i) input_dict = self.get_data_info(i) if input_dict is None: return None if input_dict['sample_idx'] < sample_idx and input_dict['scene_token'] == scene_token: self.pre_pipeline(input_dict) example = self.pipeline(input_dict) if self.filter_empty_gt and \ (example is None or len(example['gt_lane_labels_3d']._data) == 0): return None sample_idx = input_dict['sample_idx'] data_queue.insert(0, copy.deepcopy(example)) return self.union2one(data_queue) def union2one(self, queue): """ convert sample queue into one single sample. """ imgs_list = [each['img'].data for each in queue] metas_map = {} prev_pos = None prev_angle = None for i, each in enumerate(queue): metas_map[i] = each['img_metas'].data if i == 0: metas_map[i]['prev_bev'] = False prev_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) prev_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) metas_map[i]['can_bus'][:3] = 0 metas_map[i]['can_bus'][-1] = 0 else: metas_map[i]['prev_bev'] = True tmp_pos = copy.deepcopy(metas_map[i]['can_bus'][:3]) tmp_angle = copy.deepcopy(metas_map[i]['can_bus'][-1]) metas_map[i]['can_bus'][:3] -= prev_pos metas_map[i]['can_bus'][-1] -= prev_angle prev_pos = copy.deepcopy(tmp_pos) prev_angle = copy.deepcopy(tmp_angle) queue[-1]['img'] = DC(torch.stack(imgs_list), cpu_only=False, stack=True) queue[-1]['img_metas'] = DC(metas_map, cpu_only=True) queue = queue[-1] return queue def format_openlanev2_gt(self): gt_dict = {} for idx in range(len(self.data_infos)): info = copy.deepcopy(self.data_infos[idx]) key = (self.split, info['segment_id'], str(info['timestamp'])) areas = [] for area in info['annotation']['area']: if area['category'] == 1: points = area['points']
left_boundary = fix_pts_interpolate(points[[0, 1]], 10)
1
2023-12-06 07:13:48+00:00
8k
RobertCsordas/moe_attention
dataset/text/chunked_setencepiece_lm_dataset.py
[ { "identifier": "SentencepieceVocabulary", "path": "dataset/text/tokenizers/sentencepiece.py", "snippet": "class SentencepieceVocabulary:\n def __init__(self, path: str, train_data: Union[str, Iterator], vocab_size: int):\n global spm\n import sentencepiece as spm\n\n model_file = path + \".model\"\n\n if not os.path.exists(model_file):\n if isinstance(train_data, str):\n spm.SentencePieceTrainer.train(input=train_data, model_prefix=path, vocab_size=vocab_size, split_digits=True, model_type=\"bpe\")\n else:\n spm.SentencePieceTrainer.train(sentence_iterator=train_data, model_prefix=path, vocab_size=vocab_size, split_digits=True, model_type=\"bpe\")\n\n self.path = path\n self.tokenizer = spm.SentencePieceProcessor()\n self.tokenizer.load(model_file)\n pass\n\n def __len__(self) -> int:\n return self.tokenizer.get_piece_size()\n\n def state_dict(self) -> Dict[str, Any]:\n return {}\n\n def load_state_dict(self, state: Dict[str, Any]):\n pass\n\n def indices_to_sentence(self, indices: List[int]) -> List[str]:\n return [self.tokenizer.IdToPiece(i) for i in indices]\n\n def sentence_to_indices(self, sentence: str) -> List[int]:\n return self.tokenizer.encode_as_ids(sentence)\n\n def __call__(self, seq: Union[List[Union[str, int]], str]) -> List[Union[int, str]]:\n if seq is None or (isinstance(seq, list) and not seq):\n return seq\n\n if isinstance(seq, str) or isinstance(seq[0], str):\n return self.sentence_to_indices(seq)\n else:\n return self.indices_to_sentence(seq)\n\n def to_string(self, seq: List[int]) -> str:\n return self.tokenizer.decode_ids(seq)" }, { "identifier": "UrlStream", "path": "framework/utils/download.py", "snippet": "class UrlStream:\n def __init__(self, url):\n self._url = url\n headers = requests.head(url, headers={\"Accept-Encoding\": \"identity\"}).headers\n headers = {k.lower(): v for k, v in headers.items()}\n self._seek_supported = headers.get('accept-ranges') == 'bytes' and 'content-length' in headers\n if self._seek_supported:\n self._size = int(headers['content-length'])\n self._curr_pos = 0\n self._buf_start_pos = 0\n self._iter = None\n self._buffer = None\n self._buf_size = 0\n self._loaded_all = False\n\n def _load_all(self):\n if self._loaded_all:\n return\n self._make_request()\n old_buf_pos = self._buffer.tell()\n self._buffer.seek(0, SEEK_END)\n for chunk in self._iter:\n self._buffer.write(chunk)\n self._buf_size = self._buffer.tell()\n self._buffer.seek(old_buf_pos, SEEK_SET)\n self._loaded_all = True\n\n def seekable(self):\n return self._seek_supported\n\n def seek(self, position, whence=SEEK_SET):\n if whence == SEEK_END:\n assert position <= 0\n if self._seek_supported:\n self.seek(self._size + position)\n else:\n self._load_all()\n self._buffer.seek(position, SEEK_END)\n self._curr_pos = self._buffer.tell()\n elif whence == SEEK_SET:\n if self._curr_pos != position:\n self._curr_pos = position\n if self._seek_supported:\n self._iter = None\n self._buffer = None\n else:\n self._load_until(position)\n self._buffer.seek(position)\n self._curr_pos = position\n else:\n assert \"Invalid whence %s\" % whence\n\n return self.tell()\n\n def tell(self):\n return self._curr_pos\n\n def _load_until(self, goal_position):\n self._make_request()\n old_buf_pos = self._buffer.tell()\n current_position = self._buffer.seek(0, SEEK_END)\n\n goal_position = goal_position - self._buf_start_pos\n while current_position < goal_position:\n try:\n d = next(self._iter)\n self._buffer.write(d)\n current_position += len(d)\n except StopIteration:\n break\n self._buf_size = current_position\n self._buffer.seek(old_buf_pos, SEEK_SET)\n\n def _new_buffer(self):\n remaining = self._buffer.read() if self._buffer is not None else None\n self._buffer = BytesIO()\n if remaining is not None:\n self._buffer.write(remaining)\n self._buf_start_pos = self._curr_pos\n self._buf_size = 0 if remaining is None else len(remaining)\n self._buffer.seek(0, SEEK_SET)\n self._loaded_all = False\n\n def _make_request(self):\n if self._iter is None:\n h = {\n \"User-agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36\",\n }\n if self._seek_supported:\n h[\"Range\"] = \"bytes=%d-%d\" % (self._curr_pos, self._size - 1)\n\n r = requests.get(self._url, headers=h, stream=True)\n\n self._iter = r.iter_content(1024 * 1024)\n self._new_buffer()\n elif self._seek_supported and self._buf_size > 128 * 1024 * 1024:\n self._new_buffer()\n\n def size(self):\n if self._seek_supported:\n return self._size\n else:\n self._load_all()\n return self._buf_size\n\n def read(self, size=None):\n if size is None:\n size = self.size()\n\n self._load_until(self._curr_pos + size)\n if self._seek_supported:\n self._curr_pos = min(self._curr_pos + size, self._size)\n\n read_data = self._buffer.read(size)\n if not self._seek_supported:\n self._curr_pos += len(read_data)\n return read_data\n\n def iter_content(self, block_size):\n while True:\n d = self.read(block_size)\n if not len(d):\n break\n yield d" }, { "identifier": "LockFile", "path": "framework/utils/lockfile.py", "snippet": "class LockFile:\n def __init__(self, fname: str):\n self._fname = fname\n self._fd = None\n\n def acquire(self):\n self._fd=open(self._fname, \"w\")\n try:\n os.chmod(self._fname, 0o777)\n except PermissionError:\n # If another user created it already, we don't have the permission to change the access rights.\n # But it can be ignored because the creator already set it right.\n pass\n\n while True:\n try:\n fcntl.flock(self._fd, fcntl.LOCK_EX | fcntl.LOCK_NB)\n break\n except IOError as e:\n if e.errno != errno.EAGAIN:\n raise\n else:\n time.sleep(0.1)\n\n def release(self):\n fcntl.flock(self._fd, fcntl.LOCK_UN)\n self._fd.close()\n self._fd = None\n\n def __enter__(self):\n self.acquire()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.release()" }, { "identifier": "WordLevelLanguageModelTestState", "path": "dataset/text/lm_dataset.py", "snippet": "class WordLevelLanguageModelTestState(CharLevelLanguageModelTestState):\n def plot(self) -> Dict[str, Any]:\n loss = self.loss_sum / self.n_total\n bpc = np.exp(loss)\n return {\n \"loss\": loss,\n \"accuracy\": self.accuracy,\n \"perplexity\": bpc\n }" } ]
from .tokenizers.sentencepiece import SentencepieceVocabulary from framework.utils.download import UrlStream from framework.utils import LockFile from typing import List, Optional, Dict, Any from .lm_dataset import WordLevelLanguageModelTestState import gzip import json import numpy as np import os import bisect import time import torch.multiprocessing as mp import math
3,866
chunk_sizes = [] for i in range(self._n_chunks): fn = self._chunk_fname(i) if os.path.exists(fn): chunk_sizes.append(os.path.getsize(fn) // self.data_dtype(0).itemsize) else: break return chunk_sizes def get_ready_tokens(self) -> int: return sum(self.get_chunk_sizes()) def __init__(self, unroll_len: int, n_extra: int = 1, split: str = 'train', cache_dir: str = "./cache/", n_tokens: int = 8000, token_limit: Optional[int] = None) -> None: self.split = split self.n_tokens = n_tokens self.unroll_len = unroll_len self.n_extra = n_extra self.update_data_type() self._cache_dir = os.path.join(cache_dir, self.__class__.__name__, self._get_variant_id()) self._chunk_dir = os.path.join(self._cache_dir, "tokenized_chunks", split) self._n_chunks = self.get_n_shards() self.chunk_sizes = [0] * self._n_chunks self.chunk_offsets = [0] * self._n_chunks self.chunk_mmap = [None] * self._n_chunks self.last_available_chunk = -1 self.last_accessed_chunk = -1 self.token_limit = int(math.ceil(token_limit)) if token_limit is not None else None os.makedirs(self._chunk_dir, exist_ok=True) self._sp_model_name = os.path.join(self._cache_dir, "tokenizer.model") with LockFile(self._cache_dir + "/lock"): self.vocabulary = SentencepieceVocabulary(self._sp_model_name, GenToIt(self.get_tokenizer_train_sentences), n_tokens) print(f"{self.__class__.__name__}: Loaded tokenizer.") missing = [i for i in range(self._n_chunks) if not os.path.exists(self._chunk_fname(i))] print(f"{self.__class__.__name__}: {len(missing)} chunks missing") if missing: if token_limit is not None: pool = mp.Pool(min(mp.cpu_count(), len(missing))) while True: tokens_ready = self.get_ready_tokens() if tokens_ready >= token_limit: print("Token limit reached. No need to tokenize more.") break print(f"{self.__class__.__name__}: {tokens_ready/token_limit*100:.2f}% ready.") chunks_ready = len(self.get_chunk_sizes()) if chunks_ready == 0: print("Tokenizing first chunk to estimate the number of required chunks...") pool.map(self.tokenize_chunk, [0]) continue elif chunks_ready >= self._n_chunks: print("All chunks ready. No need to tokenize more.") break n_estimated = int(math.ceil(chunks_ready * (token_limit / tokens_ready))) print(f"{self.__class__.__name__}: Tokenizing {n_estimated} estimated chunks...") pool.map(self.tokenize_chunk, [a for a in range(chunks_ready, n_estimated) if a in missing]) print(f"Limiting to {token_limit} tokens") missing = missing[:token_limit // self.unroll_len] del pool else: mp.Pool(min(mp.cpu_count(), len(missing))).map(self.tokenize_chunk, missing) self.chunk_sizes = self.get_chunk_sizes() self.chunk_offsets = self.chunk_offsets[:len(self.chunk_sizes)] self.chunk_mmap = self.chunk_mmap[:len(self.chunk_sizes)] lim_found = False for i in range(1, len(self.chunk_sizes)): self.chunk_offsets[i] = self.chunk_offsets[i - 1] + self.chunk_sizes[i] if self.token_limit is not None and not lim_found and self.chunk_offsets[i] >= self.token_limit: print(f"{self.__class__.__name__}: Limiting to first {i} chunks because limited to {self.token_limit} tokens") lim_found = True def __len__(self): l = self.linear_len() if self.token_limit is not None: l = min(l, self.token_limit) return l // self.unroll_len def linear_len(self): return self.chunk_sizes[-1] + self.chunk_offsets[-1] def get_linear(self, offset: int, clen: int): chunk_index = bisect.bisect(self.chunk_offsets, offset) - 1 chunk_offset = offset - self.chunk_offsets[chunk_index] self.do_mmap(chunk_index) if chunk_offset + clen > self.chunk_sizes[chunk_index]: # Wrapping over chunk boundary next_chunk = (chunk_index + 1) % len(self.chunk_sizes) self.do_mmap(next_chunk) d1 = self.chunk_mmap[chunk_index][chunk_offset:] d2 = self.chunk_mmap[next_chunk][:clen-len(d1)] r = np.concatenate([d1, d2]) else: r = self.chunk_mmap[chunk_index][chunk_offset:chunk_offset+clen] assert r.shape[0] == clen return r def __getitem__(self, item: int) -> Dict[str, Any]: return { "data": self.get_linear(item * self.unroll_len, self.unroll_len + self.n_extra) }
# Based on https://huggingface.co/datasets/c4/blob/main/c4.py class GenToIt: def __init__(self, gen, *args, **kwargs): self.gen = gen self.args = args self.kwargs = kwargs self.gen_inst = None self.__iter__() self.initialized = False def __iter__(self): assert (self.gen_inst is None) or (self.initialized == False) self.initialized = True self.gen_inst = self.gen(*self.args, **self.kwargs) return self def __next__(self): try: n = next(self.gen_inst) return n except StopIteration: self.gen_inst = None raise class ChunkedSentencepieceLMDataset: TOKENIZER_N_FILES = 10 def _get_variant_id(self) -> str: return f"{self.__class__.__name__}-{self.n_tokens}" def gzip_line_iterator(self, url: str): stream = UrlStream(url) print(f"Opening shard {url}, size {stream.size()}") for l in gzip.GzipFile(fileobj=stream): txt = json.loads(l.decode("utf-8"))["text"] if txt: yield txt + "<STORY_SEP>" def get_url(self, index: int, split: Optional[str] = None) -> str: raise NotImplementedError() def get_n_shards(self, split: Optional[str] = None) -> int: raise NotImplementedError() def get_tokenizer_train_sentences(self): n_files = min(self.TOKENIZER_N_FILES, self.get_n_shards("train")) for i in range(n_files): url = self.get_url(i, "train") for txt in self.gzip_line_iterator(url): yield txt def _chunk_fname(self, index: int) -> str: return os.path.join(self._chunk_dir, f"chunk_{index}.bin") def tokenize_chunk(self, chunk_index): fname = self._chunk_fname(chunk_index) if not os.path.exists(fname): print(f"Tokenizing chunk {chunk_index}...") url = self.get_url(chunk_index) with open(fname+".tmp", "wb") as out_f: for l in self.gzip_line_iterator(url): np.asarray(self.vocabulary(l), dtype=self.data_dtype).tofile(out_f) os.rename(fname+".tmp", fname) print(f"Tokenizing chunk {chunk_index} done.") def do_mmap(self, index: int): if self.chunk_mmap[index] is None: self.chunk_mmap[index] = np.memmap(self._chunk_fname(index), dtype=self.data_dtype, mode='r') def update_data_type(self): # Avoid unnecessary copying if self.n_tokens >= 2**31 - 1: self.data_dtype = np.int64 elif self.n_tokens >= 2**15 - 1: self.data_dtype = np.int32 elif self.n_tokens >= 2**8: self.data_dtype = np.int16 else: self.data_dtype = np.uint8 def get_chunk_sizes(self) -> List[int]: chunk_sizes = [] for i in range(self._n_chunks): fn = self._chunk_fname(i) if os.path.exists(fn): chunk_sizes.append(os.path.getsize(fn) // self.data_dtype(0).itemsize) else: break return chunk_sizes def get_ready_tokens(self) -> int: return sum(self.get_chunk_sizes()) def __init__(self, unroll_len: int, n_extra: int = 1, split: str = 'train', cache_dir: str = "./cache/", n_tokens: int = 8000, token_limit: Optional[int] = None) -> None: self.split = split self.n_tokens = n_tokens self.unroll_len = unroll_len self.n_extra = n_extra self.update_data_type() self._cache_dir = os.path.join(cache_dir, self.__class__.__name__, self._get_variant_id()) self._chunk_dir = os.path.join(self._cache_dir, "tokenized_chunks", split) self._n_chunks = self.get_n_shards() self.chunk_sizes = [0] * self._n_chunks self.chunk_offsets = [0] * self._n_chunks self.chunk_mmap = [None] * self._n_chunks self.last_available_chunk = -1 self.last_accessed_chunk = -1 self.token_limit = int(math.ceil(token_limit)) if token_limit is not None else None os.makedirs(self._chunk_dir, exist_ok=True) self._sp_model_name = os.path.join(self._cache_dir, "tokenizer.model") with LockFile(self._cache_dir + "/lock"): self.vocabulary = SentencepieceVocabulary(self._sp_model_name, GenToIt(self.get_tokenizer_train_sentences), n_tokens) print(f"{self.__class__.__name__}: Loaded tokenizer.") missing = [i for i in range(self._n_chunks) if not os.path.exists(self._chunk_fname(i))] print(f"{self.__class__.__name__}: {len(missing)} chunks missing") if missing: if token_limit is not None: pool = mp.Pool(min(mp.cpu_count(), len(missing))) while True: tokens_ready = self.get_ready_tokens() if tokens_ready >= token_limit: print("Token limit reached. No need to tokenize more.") break print(f"{self.__class__.__name__}: {tokens_ready/token_limit*100:.2f}% ready.") chunks_ready = len(self.get_chunk_sizes()) if chunks_ready == 0: print("Tokenizing first chunk to estimate the number of required chunks...") pool.map(self.tokenize_chunk, [0]) continue elif chunks_ready >= self._n_chunks: print("All chunks ready. No need to tokenize more.") break n_estimated = int(math.ceil(chunks_ready * (token_limit / tokens_ready))) print(f"{self.__class__.__name__}: Tokenizing {n_estimated} estimated chunks...") pool.map(self.tokenize_chunk, [a for a in range(chunks_ready, n_estimated) if a in missing]) print(f"Limiting to {token_limit} tokens") missing = missing[:token_limit // self.unroll_len] del pool else: mp.Pool(min(mp.cpu_count(), len(missing))).map(self.tokenize_chunk, missing) self.chunk_sizes = self.get_chunk_sizes() self.chunk_offsets = self.chunk_offsets[:len(self.chunk_sizes)] self.chunk_mmap = self.chunk_mmap[:len(self.chunk_sizes)] lim_found = False for i in range(1, len(self.chunk_sizes)): self.chunk_offsets[i] = self.chunk_offsets[i - 1] + self.chunk_sizes[i] if self.token_limit is not None and not lim_found and self.chunk_offsets[i] >= self.token_limit: print(f"{self.__class__.__name__}: Limiting to first {i} chunks because limited to {self.token_limit} tokens") lim_found = True def __len__(self): l = self.linear_len() if self.token_limit is not None: l = min(l, self.token_limit) return l // self.unroll_len def linear_len(self): return self.chunk_sizes[-1] + self.chunk_offsets[-1] def get_linear(self, offset: int, clen: int): chunk_index = bisect.bisect(self.chunk_offsets, offset) - 1 chunk_offset = offset - self.chunk_offsets[chunk_index] self.do_mmap(chunk_index) if chunk_offset + clen > self.chunk_sizes[chunk_index]: # Wrapping over chunk boundary next_chunk = (chunk_index + 1) % len(self.chunk_sizes) self.do_mmap(next_chunk) d1 = self.chunk_mmap[chunk_index][chunk_offset:] d2 = self.chunk_mmap[next_chunk][:clen-len(d1)] r = np.concatenate([d1, d2]) else: r = self.chunk_mmap[chunk_index][chunk_offset:chunk_offset+clen] assert r.shape[0] == clen return r def __getitem__(self, item: int) -> Dict[str, Any]: return { "data": self.get_linear(item * self.unroll_len, self.unroll_len + self.n_extra) }
def start_test(self) -> WordLevelLanguageModelTestState:
3
2023-12-13 08:45:02+00:00
8k
Q-Future/Q-Align
q_align/model/modeling_mplug_owl2.py
[ { "identifier": "MPLUGOwl2Config", "path": "q_align/model/configuration_mplug_owl2.py", "snippet": "class MPLUGOwl2Config(LlamaConfig):\n model_type = \"mplug_owl2\"\n def __init__(self, visual_config=None, **kwargs):\n if visual_config is None:\n self.visual_config = DEFAULT_VISUAL_CONFIG\n else:\n self.visual_config = visual_config\n \n super().__init__(\n **kwargs,\n )" }, { "identifier": "MplugOwlVisionConfig", "path": "q_align/model/configuration_mplug_owl2.py", "snippet": "class MplugOwlVisionConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`MplugOwlVisionModel`]. It is used to instantiate\n a\n mPLUG-Owl vision encoder according to the specified arguments, defining the model architecture. Instantiating a\n configuration defaults will yield a similar configuration to that of the mPLUG-Owl\n [x-plug/x_plug-llama-7b](https://huggingface.co/x-plug/x_plug-llama-7b) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n hidden_size (`int`, *optional*, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n intermediate_size (`int`, *optional*, defaults to 3072):\n Dimensionality of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n num_hidden_layers (`int`, *optional*, defaults to 12):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n image_size (`int`, *optional*, defaults to 224):\n The size (resolution) of each image.\n patch_size (`int`, *optional*, defaults to 32):\n The size (resolution) of each patch.\n hidden_act (`str` or `function`, *optional*, defaults to `\"quick_gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` ``\"quick_gelu\"` are supported.\n layer_norm_eps (`float`, *optional*, defaults to 1e-5):\n The epsilon used by the layer normalization layers.\n attention_dropout (`float`, *optional*, defaults to 0.0):\n The dropout ratio for the attention probabilities.\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n initializer_factor (`float`, *optional*, defaults to 1):\n A factor for initializing all weight matrices (should be kept to 1, used internally for initialization\n testing).\n\n\n ```\"\"\"\n\n model_type = \"mplug_owl_vision_model\"\n\n def __init__(\n self,\n hidden_size=1024,\n intermediate_size=4096,\n projection_dim=768,\n num_hidden_layers=24,\n num_attention_heads=16,\n num_channels=3,\n image_size=448,\n patch_size=14,\n hidden_act=\"quick_gelu\",\n layer_norm_eps=1e-6,\n attention_dropout=0.0,\n initializer_range=0.02,\n initializer_factor=1.0,\n use_flash_attn=False,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.projection_dim = projection_dim\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.num_channels = num_channels\n self.patch_size = patch_size\n self.image_size = image_size\n self.initializer_range = initializer_range\n self.initializer_factor = initializer_factor\n self.attention_dropout = attention_dropout\n self.layer_norm_eps = layer_norm_eps\n self.hidden_act = hidden_act\n self.use_flash_attn = use_flash_attn\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the vision config dict if we are loading from MplugOwlConfig\n if config_dict.get(\"model_type\") == \"mplug-owl\":\n config_dict = config_dict[\"vision_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)" }, { "identifier": "MplugOwlVisualAbstractorConfig", "path": "q_align/model/configuration_mplug_owl2.py", "snippet": "class MplugOwlVisualAbstractorConfig(PretrainedConfig):\n model_type = \"mplug_owl_visual_abstract\"\n\n def __init__(\n self,\n num_learnable_queries=64,\n hidden_size=1024,\n num_hidden_layers=6,\n num_attention_heads=16,\n intermediate_size=2816,\n attention_probs_dropout_prob=0.,\n initializer_range=0.02,\n layer_norm_eps=1e-6,\n encoder_hidden_size=1024,\n grid_size=None,\n **kwargs,\n ):\n super().__init__(**kwargs)\n self.hidden_size = hidden_size\n self.num_learnable_queries = num_learnable_queries\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.encoder_hidden_size = encoder_hidden_size\n self.grid_size = grid_size if grid_size else 32\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # get the visual_abstractor config dict if we are loading from MplugOwlConfig\n if config_dict.get(\"model_type\") == \"mplug-owl\":\n config_dict = config_dict[\"abstractor_config\"]\n\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)" }, { "identifier": "MplugOwlVisionModel", "path": "q_align/model/visual_encoder.py", "snippet": "class MplugOwlVisionModel(PreTrainedModel):\n main_input_name = \"pixel_values\"\n _no_split_modules = [\"MplugOwlVisionEncoderLayer\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n self.hidden_size = config.hidden_size\n\n self.embeddings = MplugOwlVisionEmbeddings(config)\n self.encoder = MplugOwlVisionEncoder(config)\n self.post_layernorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps)\n\n self.post_init()\n\n\n def forward(\n self,\n pixel_values: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, BaseModelOutputWithPooling]:\n r\"\"\"\n Returns:\n\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if pixel_values is None:\n raise ValueError(\"You have to specify pixel_values\")\n\n hidden_states = self.embeddings(pixel_values)\n\n encoder_outputs = self.encoder(\n inputs_embeds=hidden_states,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n last_hidden_state = encoder_outputs[0]\n last_hidden_state = self.post_layernorm(last_hidden_state)\n\n pooled_output = last_hidden_state[:, 0, :]\n pooled_output = self.post_layernorm(pooled_output)\n\n if not return_dict:\n return (last_hidden_state, pooled_output) + encoder_outputs[1:]\n\n return BaseModelOutputWithPooling(\n last_hidden_state=last_hidden_state,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n attentions=encoder_outputs.attentions,\n )\n\n def get_input_embeddings(self):\n return self.embeddings" }, { "identifier": "MplugOwlVisualAbstractorModel", "path": "q_align/model/visual_encoder.py", "snippet": "class MplugOwlVisualAbstractorModel(PreTrainedModel):\n _no_split_modules = [\"MplugOwlVisualAbstractorLayer\"]\n def __init__(self, config, language_hidden_size):\n super().__init__(config)\n self.config = config\n\n self.encoder = MplugOwlVisualAbstractorEncoder(config)\n self.visual_fc = torch.nn.Linear(config.hidden_size, language_hidden_size)\n self.query_embeds = torch.nn.Parameter(torch.randn(1, config.num_learnable_queries, config.hidden_size))\n self.vit_eos = torch.nn.Parameter(torch.randn(1, 1, language_hidden_size))\n\n self.post_init()\n\n def _prune_heads(self, heads_to_prune):\n \"\"\"\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\n class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.encoder.layer[layer].attention.prune_heads(heads)\n\n def get_extended_attention_mask(\n self,\n attention_mask: torch.Tensor,\n input_shape: Tuple[int],\n device: torch.device,\n ) -> torch.Tensor:\n \"\"\"\n Makes broadcastable attention and causal masks so that future and masked tokens are ignored.\n\n Arguments:\n attention_mask (`torch.Tensor`):\n Mask with ones indicating tokens to attend to, zeros for tokens to ignore.\n input_shape (`Tuple[int]`):\n The shape of the input to the model.\n device: (`torch.device`):\n The device of the input to the model.\n\n Returns:\n `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.\n \"\"\"\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n if attention_mask.dim() == 3:\n extended_attention_mask = attention_mask[:, None, :, :]\n elif attention_mask.dim() == 2:\n # Provided a padding mask of dimensions [batch_size, seq_length]\n # - the model is an encoder, so make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]\n extended_attention_mask = attention_mask[:, None, None, :]\n else:\n raise ValueError(\n \"Wrong shape for input_ids (shape {}) or attention_mask (shape {})\".format(\n input_shape, attention_mask.shape\n )\n )\n\n # Since attention_mask is 1.0 for positions we want to attend and 0.0 for\n # masked positions, this operation will create a tensor which is 0.0 for\n # positions we want to attend and -10000.0 for masked positions.\n # Since we are adding it to the raw scores before the softmax, this is\n # effectively the same as removing these entirely.\n extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n return extended_attention_mask\n\n def forward(\n self,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n r\"\"\"\n encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, `optional`):\n Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if\n the model is configured as a decoder.\n encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, `optional`):\n Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in\n the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of:\n shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and\n value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are\n used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key\n value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape\n `(batch_size, sequence_length)`.\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n \n query_embeds = self.query_embeds.repeat(encoder_hidden_states.shape[0], 1, 1)\n embedding_output = query_embeds\n input_shape = embedding_output.size()[:-1]\n batch_size, seq_length = input_shape\n device = embedding_output.device\n\n # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]\n # ourselves in which case we just need to make it broadcastable to all heads.\n if attention_mask is None:\n attention_mask = torch.ones(\n (query_embeds.shape[0], query_embeds.shape[1]), dtype=torch.long, device=query_embeds.device\n )\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device)\n\n # If a 2D or 3D attention mask is provided for the cross-attention\n # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]\n if encoder_hidden_states is not None:\n if type(encoder_hidden_states) == list:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[0].size()\n else:\n (\n encoder_batch_size,\n encoder_sequence_length,\n _,\n ) = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n\n if type(encoder_attention_mask) == list:\n encoder_extended_attention_mask = [self.invert_attention_mask(mask) for mask in encoder_attention_mask]\n elif encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n encoder_outputs = self.encoder(\n embedding_output,\n attention_mask=extended_attention_mask,\n head_mask=head_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n past_key_values=past_key_values,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n sequence_output = encoder_outputs[0]\n pooled_output = sequence_output[:, 0, :]\n\n sequence_output = self.visual_fc(sequence_output)\n sequence_output = torch.cat([sequence_output, self.vit_eos.repeat(sequence_output.shape[0], 1, 1)], dim=1)\n\n return BaseModelOutputWithPooling(\n last_hidden_state=sequence_output,\n pooler_output=pooled_output,\n hidden_states=encoder_outputs.hidden_states,\n )" }, { "identifier": "replace_llama_modality_adaptive", "path": "q_align/model/modeling_llama2.py", "snippet": "def replace_llama_modality_adaptive():\n transformers.models.llama.configuration_llama.LlamaConfig = LlamaConfig\n transformers.models.llama.modeling_llama.LlamaAttention = LlamaAttention\n transformers.models.llama.modeling_llama.LlamaFlashAttention2 = LlamaFlashAttention2\n transformers.models.llama.modeling_llama.LlamaSdpaAttention = LlamaSdpaAttention\n transformers.models.llama.modeling_llama.LlamaDecoderLayer = LlamaDecoderLayer\n transformers.models.llama.modeling_llama.LlamaModel.forward = model_forward\n transformers.models.llama.modeling_llama.LlamaForCausalLM.forward = causal_model_forward" } ]
from abc import ABC, abstractmethod from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, CLIPImageProcessor, LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from .configuration_mplug_owl2 import MPLUGOwl2Config, MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig from .visual_encoder import MplugOwlVisionModel, MplugOwlVisualAbstractorModel from .modeling_llama2 import replace_llama_modality_adaptive from icecream import ic from PIL import Image from icecream import ic import torch import torch.nn as nn import copy import os import sys
7,090
image_features = [x.flatten(0, 1) for x in image_features] else: image_features = self.encode_images(images) new_input_embeds = [] new_modality_indicators = [] new_labels = [] if labels is not None else None cur_image_idx = 0 for batch_idx, cur_input_ids in enumerate(input_ids): if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # multimodal LLM, but the current sample is not multimodal # FIXME: this is a hacky fix, for deepspeed zero3 to work half_len = cur_input_ids.shape[0] // 2 cur_image_features = image_features[cur_image_idx] cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len]) cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:]) cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0) new_input_embeds.append(cur_input_embeds) cur_modality_indicators = torch.zeros(len(cur_input_embeds)).long().to(self.device) new_modality_indicators.append(cur_modality_indicators) if labels is not None: new_labels.append(labels[batch_idx]) cur_image_idx += 1 continue image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] cur_new_input_embeds = [] cur_modality_indicators = [] if labels is not None: cur_labels = labels[batch_idx] cur_new_labels = [] assert cur_labels.shape == cur_input_ids.shape while image_token_indices.numel() > 0: cur_image_features = image_features[cur_image_idx] image_token_start = image_token_indices[0] cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) cur_new_input_embeds.append(cur_image_features) # Add modality indicator assert image_token_start == len(cur_input_ids[:image_token_start]) cur_modality_indicators.append(torch.zeros(len(cur_input_ids[:image_token_start])).long()) cur_modality_indicators.append(torch.ones(len(cur_image_features)).long()) if labels is not None: cur_new_labels.append(cur_labels[:image_token_start]) cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) cur_labels = cur_labels[image_token_start+1:] cur_image_idx += 1 cur_input_ids = cur_input_ids[image_token_start+1:] image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] if cur_input_ids.numel() > 0: cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids)) cur_modality_indicators.append(torch.zeros(len(cur_input_ids)).long()) if labels is not None: cur_new_labels.append(cur_labels) cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds] cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) new_input_embeds.append(cur_new_input_embeds) # Modality cur_modality_indicators = [x.to(device=self.device) for x in cur_modality_indicators] cur_modality_indicators = torch.cat(cur_modality_indicators, dim=0) new_modality_indicators.append(cur_modality_indicators) if labels is not None: cur_new_labels = torch.cat(cur_new_labels, dim=0) new_labels.append(cur_new_labels) if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): max_len = max(x.shape[0] for x in new_input_embeds) # Embedding new_input_embeds_align = [] for cur_new_embed in new_input_embeds: cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0) new_input_embeds_align.append(cur_new_embed) new_input_embeds = torch.stack(new_input_embeds_align, dim=0) # Modality new_modality_indicators_align = [] for cur_modality_indicator in new_modality_indicators: cur_new_embed = torch.cat((cur_modality_indicator, torch.zeros(max_len - cur_modality_indicator.shape[0], dtype=cur_modality_indicator.dtype, device=cur_modality_indicator.device)), dim=0) new_modality_indicators_align.append(cur_new_embed) new_modality_indicators = torch.stack(new_modality_indicators_align, dim=0) # Label if labels is not None: new_labels_align = [] _new_labels = new_labels for cur_new_label in new_labels: cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0) new_labels_align.append(cur_new_label) new_labels = torch.stack(new_labels_align, dim=0) # Attention Mask if attention_mask is not None: new_attention_mask = [] for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels): new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device) new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device) cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0) new_attention_mask.append(cur_new_attention_mask) attention_mask = torch.stack(new_attention_mask, dim=0) assert attention_mask.shape == new_labels.shape else: new_input_embeds = torch.stack(new_input_embeds, dim=0) new_modality_indicators = torch.stack(new_modality_indicators, dim=0) if labels is not None: new_labels = torch.stack(new_labels, dim=0) if attention_mask is not None: new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) assert attention_mask.shape == new_input_embeds.shape[:2] return None, new_modality_indicators, attention_mask, past_key_values, new_input_embeds, new_labels class MPLUGOwl2LlamaModel(MPLUGOwl2MetaModel, LlamaModel):
# Copyright 2023 Haotian Liu & Qinghao Ye (Modified from LLaVA) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.insert(0, dir_path) IGNORE_INDEX = -100 IMAGE_TOKEN_INDEX = -200 DEFAULT_IMAGE_TOKEN = "<|image|>" def tokenizer_image_token(prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None): prompt_chunks = [tokenizer(chunk).input_ids if len(chunk) > 0 else [] for chunk in prompt.split(DEFAULT_IMAGE_TOKEN)] def insert_separator(X, sep): return [ele for sublist in zip(X, [sep]*len(X)) for ele in sublist][:-1] input_ids = [] offset = 0 if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and prompt_chunks[0][0] == tokenizer.bos_token_id: offset = 1 input_ids.append(prompt_chunks[0][0]) for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)): input_ids.extend(x[offset:]) if return_tensors is not None: if return_tensors == 'pt': return torch.tensor(input_ids, dtype=torch.long) raise ValueError(f'Unsupported tensor type: {return_tensors}') return input_ids def expand2square(pil_img, background_color): width, height = pil_img.size if width == height: return pil_img elif width > height: result = Image.new(pil_img.mode, (width, width), background_color) result.paste(pil_img, (0, (width - height) // 2)) return result else: result = Image.new(pil_img.mode, (height, height), background_color) result.paste(pil_img, ((height - width) // 2, 0)) return result class MPLUGOwl2MetaModel: def __init__(self, config): super(MPLUGOwl2MetaModel, self).__init__(config) self.vision_model = MplugOwlVisionModel( MplugOwlVisionConfig(**config.visual_config["visual_model"]) ) self.visual_abstractor = MplugOwlVisualAbstractorModel( MplugOwlVisualAbstractorConfig(**config.visual_config["visual_abstractor"]), config.hidden_size ) def get_vision_tower(self): vision_model = getattr(self, 'vision_model', None) if type(vision_model) is list: vision_model = vision_model[0] return vision_model def get_visual_abstractor(self): visual_abstractor = getattr(self, 'visual_abstractor', None) if type(visual_abstractor) is list: visual_abstractor = visual_abstractor[0] return visual_abstractor class MPLUGOwl2MetaForCausalLM(ABC): @abstractmethod def get_model(self): pass def encode_images(self, images): image_features = self.get_model().vision_model(images).last_hidden_state image_features = self.get_model().visual_abstractor(encoder_hidden_states=image_features).last_hidden_state return image_features def prepare_inputs_labels_for_multimodal( self, input_ids, attention_mask, past_key_values, labels, images ): if images is None or input_ids.shape[1] == 1: if past_key_values is not None and images is not None and input_ids.shape[1] == 1: attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device) multiway_indices = torch.zeros_like(input_ids).long().to(self.device) return input_ids, multiway_indices, attention_mask, past_key_values, None, labels if type(images) is list or images.ndim == 5: concat_images = torch.cat([image for image in images], dim=0) image_features = self.encode_images(concat_images) split_sizes = [image.shape[0] for image in images] image_features = torch.split(image_features, split_sizes, dim=0) image_features = [x.flatten(0, 1) for x in image_features] else: image_features = self.encode_images(images) new_input_embeds = [] new_modality_indicators = [] new_labels = [] if labels is not None else None cur_image_idx = 0 for batch_idx, cur_input_ids in enumerate(input_ids): if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0: # multimodal LLM, but the current sample is not multimodal # FIXME: this is a hacky fix, for deepspeed zero3 to work half_len = cur_input_ids.shape[0] // 2 cur_image_features = image_features[cur_image_idx] cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids[:half_len]) cur_input_embeds_2 = self.get_model().embed_tokens(cur_input_ids[half_len:]) cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0], cur_input_embeds_2], dim=0) new_input_embeds.append(cur_input_embeds) cur_modality_indicators = torch.zeros(len(cur_input_embeds)).long().to(self.device) new_modality_indicators.append(cur_modality_indicators) if labels is not None: new_labels.append(labels[batch_idx]) cur_image_idx += 1 continue image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] cur_new_input_embeds = [] cur_modality_indicators = [] if labels is not None: cur_labels = labels[batch_idx] cur_new_labels = [] assert cur_labels.shape == cur_input_ids.shape while image_token_indices.numel() > 0: cur_image_features = image_features[cur_image_idx] image_token_start = image_token_indices[0] cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start])) cur_new_input_embeds.append(cur_image_features) # Add modality indicator assert image_token_start == len(cur_input_ids[:image_token_start]) cur_modality_indicators.append(torch.zeros(len(cur_input_ids[:image_token_start])).long()) cur_modality_indicators.append(torch.ones(len(cur_image_features)).long()) if labels is not None: cur_new_labels.append(cur_labels[:image_token_start]) cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype)) cur_labels = cur_labels[image_token_start+1:] cur_image_idx += 1 cur_input_ids = cur_input_ids[image_token_start+1:] image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0] if cur_input_ids.numel() > 0: cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids)) cur_modality_indicators.append(torch.zeros(len(cur_input_ids)).long()) if labels is not None: cur_new_labels.append(cur_labels) cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds] cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0) new_input_embeds.append(cur_new_input_embeds) # Modality cur_modality_indicators = [x.to(device=self.device) for x in cur_modality_indicators] cur_modality_indicators = torch.cat(cur_modality_indicators, dim=0) new_modality_indicators.append(cur_modality_indicators) if labels is not None: cur_new_labels = torch.cat(cur_new_labels, dim=0) new_labels.append(cur_new_labels) if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds): max_len = max(x.shape[0] for x in new_input_embeds) # Embedding new_input_embeds_align = [] for cur_new_embed in new_input_embeds: cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0) new_input_embeds_align.append(cur_new_embed) new_input_embeds = torch.stack(new_input_embeds_align, dim=0) # Modality new_modality_indicators_align = [] for cur_modality_indicator in new_modality_indicators: cur_new_embed = torch.cat((cur_modality_indicator, torch.zeros(max_len - cur_modality_indicator.shape[0], dtype=cur_modality_indicator.dtype, device=cur_modality_indicator.device)), dim=0) new_modality_indicators_align.append(cur_new_embed) new_modality_indicators = torch.stack(new_modality_indicators_align, dim=0) # Label if labels is not None: new_labels_align = [] _new_labels = new_labels for cur_new_label in new_labels: cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0) new_labels_align.append(cur_new_label) new_labels = torch.stack(new_labels_align, dim=0) # Attention Mask if attention_mask is not None: new_attention_mask = [] for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels): new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device) new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device) cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0) new_attention_mask.append(cur_new_attention_mask) attention_mask = torch.stack(new_attention_mask, dim=0) assert attention_mask.shape == new_labels.shape else: new_input_embeds = torch.stack(new_input_embeds, dim=0) new_modality_indicators = torch.stack(new_modality_indicators, dim=0) if labels is not None: new_labels = torch.stack(new_labels, dim=0) if attention_mask is not None: new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device) attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1) assert attention_mask.shape == new_input_embeds.shape[:2] return None, new_modality_indicators, attention_mask, past_key_values, new_input_embeds, new_labels class MPLUGOwl2LlamaModel(MPLUGOwl2MetaModel, LlamaModel):
config_class = MPLUGOwl2Config
0
2023-12-14 03:36:30+00:00
8k
nox-410/tvm.tl
python/tvm/tir/tensor_intrin/cuda.py
[ { "identifier": "register_func", "path": "python/tvm/_ffi/registry.py", "snippet": "def register_func(func_name, f=None, override=False):\n \"\"\"Register global function\n\n Parameters\n ----------\n func_name : str or function\n The function name\n\n f : function, optional\n The function to be registered.\n\n override: boolean optional\n Whether override existing entry.\n\n Returns\n -------\n fregister : function\n Register function if f is not specified.\n\n Examples\n --------\n The following code registers my_packed_func as global function.\n Note that we simply get it back from global function table to invoke\n it from python side. However, we can also invoke the same function\n from C++ backend, or in the compiled TVM code.\n\n .. code-block:: python\n\n targs = (10, 10.0, \"hello\")\n @tvm.register_func\n def my_packed_func(*args):\n assert(tuple(args) == targs)\n return 10\n # Get it out from global function table\n f = tvm.get_global_func(\"my_packed_func\")\n assert isinstance(f, tvm.PackedFunc)\n y = f(*targs)\n assert y == 10\n \"\"\"\n if callable(func_name):\n f = func_name\n func_name = f.__name__\n\n if not isinstance(func_name, str):\n raise ValueError(\"expect string function name\")\n\n ioverride = ctypes.c_int(override)\n\n def register(myf):\n \"\"\"internal register function\"\"\"\n if not isinstance(myf, PackedFuncBase):\n myf = convert_to_tvm_func(myf)\n check_call(_LIB.TVMFuncRegisterGlobal(c_str(func_name), myf.handle, ioverride))\n return myf\n\n if f:\n return register(f)\n return register" }, { "identifier": "convert", "path": "python/tvm/runtime/object_generic.py", "snippet": "def convert(value, span=None):\n \"\"\"Convert value to TVM object or function.\n\n Parameters\n ----------\n value : python value\n\n span : Optional[Span]\n The location of this statement in the source code.\n\n Returns\n -------\n tvm_val : Object or Function\n Converted value in TVM\n\n Note\n ----\n This function is redirected to `convert_to_object` as it is widely used in\n the codebase. We can choose one to keep and discard the other one later.\n \"\"\"\n return convert_to_object(value, span=span)" }, { "identifier": "IntImm", "path": "python/tvm/tir/expr.py", "snippet": "class IntImm(ConstExpr):\n \"\"\"Int constant.\n\n Parameters\n ----------\n dtype : str\n The data type\n\n value : int\n The constant value.\n\n span : Optional[Span]\n The location of this itervar in the source code.\n \"\"\"\n\n def __init__(self, dtype, value, span=None):\n self.__init_handle_by_constructor__(\n tvm.ir._ffi_api.IntImm, dtype, value, span # type: ignore\n )\n\n def __hash__(self):\n return self.value\n\n def __int__(self):\n return self.value\n\n def __nonzero__(self):\n return self.value != 0\n\n def __eq__(self, other):\n return _ffi_api._OpEQ(self, other, None) # type: ignore\n\n def __ne__(self, other):\n return _ffi_api._OpNE(self, other, None) # type: ignore\n\n def __bool__(self):\n return self.__nonzero__()" }, { "identifier": "Cast", "path": "python/tvm/tir/expr.py", "snippet": "class Cast(PrimExprWithOp):\n \"\"\"Cast expression.\n\n Parameters\n ----------\n dtype : str\n The data type\n\n value : PrimExpr\n The value of the function.\n\n span : Optional[Span]\n The location of this itervar in the source code.\n \"\"\"\n\n def __init__(self, dtype, value, span=None):\n self.__init_handle_by_constructor__(_ffi_api.Cast, dtype, value, span) # type: ignore" }, { "identifier": "TensorIntrin", "path": "python/tvm/tir/function.py", "snippet": "class TensorIntrin(Object):\n \"\"\"A tensor intrinsic.\n\n Parameters\n ----------\n desc : PrimFunc\n The function to describe the computation.\n\n impl : PrimFunc\n The function of the implementation for the execution.\n \"\"\"\n\n def __init__(self, desc, impl):\n self.__init_handle_by_constructor__(_ffi_api.TensorIntrin, desc, impl)\n\n @staticmethod\n def register(name: str, desc: PrimFunc, impl: PrimFunc, override: bool = False):\n \"\"\"Register a tensor intrinsic with its name.\n\n Parameters\n ----------\n name : str\n The name of the TensorIntrin to register.\n desc : PrimFunc\n The function to describe the computation.\n impl : PrimFunc\n The function of the implementation for the execution.\n override: bool\n Whether override existing intrinsic.\n \"\"\"\n return _ffi_api.TensorIntrinRegister(\n name, TensorIntrin(desc, impl), override\n ) # type: ignore\n\n @staticmethod\n def get(name: str, allow_missing: bool = False) -> Optional[\"TensorIntrin\"]:\n \"\"\"Look up a tensor intrinsic by its name.\n\n Parameters\n ----------\n name : str\n The name of the TensorIntrin to look up.\n\n allow_missing : bool\n Whether to allow missing tensor intrin. If False, raise an error if the tensor intrin\n doesn't exist.\n\n Returns\n -------\n result : Optional[TensorIntrin]\n The TensorIntrin with the specified name, or None if not found.\n \"\"\"\n return _ffi_api.TensorIntrinGet(name, allow_missing) # pylint: type: ignore" } ]
from typing import Dict, Tuple from typing_extensions import Literal from tvm.script import tir as T from tvm.tir.function import PrimFunc from ..._ffi import register_func from ...runtime import convert from .. import Cast, IntImm, TensorIntrin
4,012
C.elem_offset + tx * lift(local_size_out), False, dtype=out_dtype, ) ) T.evaluate( T.ptx_mma( mma_prefix, "row", "col", in_dtype_abbrv, in_dtype_abbrv, out_dtype_abbrv, A.data, A.elem_offset + tx * lift(local_size), B.data, B.elem_offset + tx * lift(local_size) + lift(local_size) // 2, C.data, C.elem_offset + tx * lift(local_size_out) + lift(local_size_out) // 2, False, dtype=out_dtype, ) ) return mma_sync_desc, mma_sync_impl def get_mma_fill_intrin(dtype, local_size): zero = IntImm("int32", 0).astype(dtype) # Assume M = N = 16 index_map = shared_16x16_to_ldmatrix_32x8_layout @T.prim_func def mma_fill_desc(a: T.handle) -> None: C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp") with T.block("root"): T.reads() T.writes(C_warp[0:WARP_SIZE, 0:local_size]) for i0, i1 in T.grid(M_DIM, N_DIM): with T.block("C_warp"): i, j = T.axis.remap("SS", [i0, i1]) thread_id, local_id = T.meta_var(index_map(i, j)) T.reads() T.writes(C_warp[thread_id, local_id]) C_warp[thread_id, local_id] = zero @T.prim_func def mma_fill_impl(a: T.handle) -> None: C_warp = T.match_buffer( a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1 ) with T.block("root"): T.reads() T.writes(C_warp[0:WARP_SIZE, 0:local_size]) tx = T.env_thread("threadIdx.x") T.launch_thread(tx, WARP_SIZE) T.evaluate(T.mma_fill(local_size, C_warp.data, C_warp.elem_offset, dtype=dtype)) return mma_fill_desc, mma_fill_impl def get_mma_store_intrin(dtype, local_size, scope="global"): # Assume M = N = 16 index_map = shared_16x16_to_ldmatrix_32x8_layout @T.prim_func def mma_store_desc(a: T.handle, c: T.handle) -> None: C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp") C = T.match_buffer(c, [M_DIM, N_DIM], dtype=dtype, scope=scope) with T.block("root"): T.reads(C_warp[0:WARP_SIZE, 0:local_size]) T.writes(C[0:M_DIM, 0:N_DIM]) for i0, i1 in T.grid(M_DIM, N_DIM): with T.block("C_warp"): v0, v1 = T.axis.remap("SS", [i0, i1]) thread_id, local_id = T.meta_var(index_map(v0, v1)) T.reads(C_warp[thread_id, local_id]) T.writes(C[v0, v1]) C[v0, v1] = C_warp[thread_id, local_id] @T.prim_func def mma_store_impl(a: T.handle, c: T.handle) -> None: s0 = T.int32() s1 = T.int32() C_warp = T.match_buffer( a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1 ) C = T.match_buffer( c, [M_DIM, N_DIM], dtype=dtype, scope=scope, offset_factor=1, strides=[s0, s1] ) with T.block("root"): T.reads(C_warp[0:WARP_SIZE, 0:local_size]) T.writes(C[0:M_DIM, 0:N_DIM]) tx = T.env_thread("threadIdx.x") T.launch_thread(tx, WARP_SIZE) T.evaluate( T.mma_store( M_DIM, N_DIM, C.access_ptr("w"), C_warp.data, C_warp.elem_offset, s0, dtype=dtype, ) ) return mma_store_desc, mma_store_impl LDMATRIX_16x16_A_INTRIN = "mma.ldmatrix_16x16_a"
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name,missing-function-docstring """Intrinsics for tensorization on NVIDIA GPU.""" def shared_16x16_to_ldmatrix_32x8_layout(i, j): thread_id = 4 * (i % 8) + (j % 8) // 2 return thread_id, 4 * (j // 8) + (i // 8) * 2 + (j % 2) def shared_16x32_to_ldmatrix_32x16_layout(i, j): thread_id = 4 * (i % 8) + (j % 16) // 4 return thread_id, 8 * (j // 16) + (i // 8) * 4 + j % 4 def shared_32x16_to_ldmatrix_32x16_layout(i, j): thread_id = (i % 16) // 4 + 4 * (j % 8) return thread_id, 8 * (j // 8) + (i // 16) * 4 + i % 4 @register_func("tir.index_map.shared_16x16_to_ldmatrix_32x8_layout") def index_map_shared_16x16_to_ldmatrix_32x8_layout(ind): i, j = ind[0], ind[1] thread_id, local_id = shared_16x16_to_ldmatrix_32x8_layout(i, j) return convert([thread_id, local_id]) lift = convert M_DIM = 16 N_DIM = 16 WARP_SIZE = 32 HALF_WARP = WARP_SIZE // 2 HALF_WARP_expr = lift(HALF_WARP) def get_ldmatrix_intrin(k_dim, dtype, is_b, transposed, shared_scope="shared"): local_size = (M_DIM * k_dim) // WARP_SIZE shared_offset = None index_map = None if transposed: assert is_b, "Transposed A matrix not supported" ldmatrix_col_major = is_b and not transposed if k_dim == 16: assert dtype == "float16" index_map = shared_16x16_to_ldmatrix_32x8_layout if transposed: shared_offset = ( lambda tx, stride: stride * 8 * (tx // HALF_WARP_expr) + stride * (tx % 8) + 8 * ((tx % HALF_WARP_expr) // 8) ) else: shared_offset = lambda tx, stride: stride * (tx % HALF_WARP_expr) + 8 * ( tx // HALF_WARP_expr ) else: assert ( k_dim == 32 and dtype == "int8" ), "Only k_dim == 16 (float16) or k_dim == 32 (int8) supported for now" if ldmatrix_col_major: index_map = shared_32x16_to_ldmatrix_32x16_layout # A dummy offset, ldmatrix cannot be used for int8 + trans case. # We still use the ldmatrix intrinsic, but lower it to a manual loop in the codegen. # Only the stride information is required. shared_offset = lambda _, stride: stride elif is_b and transposed: index_map = shared_16x32_to_ldmatrix_32x16_layout shared_offset = ( lambda tx, stride: stride * 8 * (tx // HALF_WARP_expr) + (tx % 8) * stride + 16 * ((tx % HALF_WARP_expr) // 8) ) else: index_map = shared_16x32_to_ldmatrix_32x16_layout shared_offset = lambda tx, stride: stride * (tx % 16) + 16 * (tx // 16) assert index_map and shared_offset if is_b and not transposed: row_dim = k_dim col_dim = M_DIM else: row_dim = M_DIM col_dim = k_dim shmem_shape = (row_dim, col_dim) offset_factor = col_dim @T.prim_func def ldmatrix_desc(warp_handle: T.handle, shared_handle: T.handle) -> None: shared = T.match_buffer( shared_handle, shmem_shape, dtype, align=64, offset_factor=offset_factor, scope=shared_scope, ) warp = T.match_buffer( warp_handle, (WARP_SIZE, local_size), dtype, align=64, offset_factor=offset_factor, scope="warp", ) with T.block("root"): T.reads(shared[0:row_dim, 0:col_dim]) T.writes(warp[0:WARP_SIZE, 0:local_size]) for ax0, ax1 in T.grid(row_dim, col_dim): with T.block("shared_warp"): v0, v1 = T.axis.remap("SS", [ax0, ax1]) T.reads(shared[v0, v1]) thread_id, local_id = T.meta_var(index_map(v0, v1)) T.writes(warp[thread_id, local_id]) warp[thread_id, local_id] = shared[v0, v1] @T.prim_func def ldmatrix_impl(warp_handle: T.handle, shared_handle: T.handle) -> None: s0 = T.int32() s1 = T.int32() shared = T.match_buffer( shared_handle, shmem_shape, dtype, align=64, offset_factor=offset_factor, scope=shared_scope, strides=[s0, s1], ) warp = T.match_buffer( warp_handle, (WARP_SIZE, local_size), dtype, align=64, offset_factor=offset_factor, scope="warp", ) with T.block("root"): T.reads(shared[0:row_dim, 0:col_dim]) T.writes(warp[0:WARP_SIZE, 0:local_size]) tx = T.env_thread("threadIdx.x") T.launch_thread(tx, WARP_SIZE) T.evaluate( T.ptx_ldmatrix( ldmatrix_col_major, 4, # Always load 4 matrices ".b16", warp.data, warp.elem_offset + lift(local_size) * tx, shared.access_ptr("r"), shared_offset(tx, s0), dtype=dtype, ) ) return ldmatrix_desc, ldmatrix_impl def get_mma_intrin(k_dim, out_dtype, b_transposed): local_size = (M_DIM * k_dim) // WARP_SIZE local_size_out = (M_DIM * N_DIM) // 32 index_map_C = shared_16x16_to_ldmatrix_32x8_layout if k_dim == 16: index_map_A = shared_16x16_to_ldmatrix_32x8_layout index_map_B = shared_16x16_to_ldmatrix_32x8_layout mma_prefix = "m16n8k16" elif k_dim == 32 and b_transposed: index_map_A = index_map_B = shared_16x32_to_ldmatrix_32x16_layout mma_prefix = "m16n8k32" elif k_dim == 32 and not b_transposed: index_map_A = shared_16x32_to_ldmatrix_32x16_layout index_map_B = shared_32x16_to_ldmatrix_32x16_layout mma_prefix = "m16n8k32" else: assert False out_dtype_abbrv = {"float16": "fp16", "float32": "fp32", "int32": "int32"}[out_dtype] if out_dtype in ["float16", "float32"]: in_dtype = "float16" in_dtype_abbrv = "fp16" else: in_dtype = "int8" in_dtype_abbrv = "int8" def maybe_cast(v): if out_dtype in ["float32", "int32"]: return Cast(out_dtype, v) return v def maybe_swap(i, j): if b_transposed: return j, i return i, j A_offset_factor = k_dim B_offset_factor = maybe_swap(k_dim, N_DIM)[-1] out_offset_factor = N_DIM @T.prim_func def mma_sync_desc(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer( a, (WARP_SIZE, local_size), in_dtype, align=64, offset_factor=A_offset_factor, scope="warp", ) B = T.match_buffer( b, (WARP_SIZE, local_size), in_dtype, align=64, offset_factor=B_offset_factor, scope="warp", ) C = T.match_buffer( c, (WARP_SIZE, local_size_out), out_dtype, align=64, offset_factor=out_offset_factor, scope="warp", ) with T.block("root"): T.reads( C[0:WARP_SIZE, 0:local_size_out], A[0:WARP_SIZE, 0:local_size], B[0:WARP_SIZE, 0:local_size], ) T.writes(C[0:WARP_SIZE, 0:local_size_out]) for i, j, k in T.grid(M_DIM, N_DIM, k_dim): with T.block("C"): i, j, k = T.axis.remap("SSR", [i, j, k]) b_row_ind, b_col_ind = T.meta_var(maybe_swap(k, j)) thread_id_C, local_id_C = T.meta_var(index_map_C(i, j)) thread_id_A, local_id_A = T.meta_var(index_map_A(i, k)) thread_id_B, local_id_B = T.meta_var(index_map_B(b_row_ind, b_col_ind)) T.reads( C[thread_id_C, local_id_C], A[thread_id_A, local_id_A], B[thread_id_B, local_id_B], ) T.writes(C[thread_id_C, local_id_C]) C[thread_id_C, local_id_C] += maybe_cast( A[thread_id_A, local_id_A] ) * maybe_cast(B[thread_id_B, local_id_B]) @T.prim_func def mma_sync_impl(a: T.handle, b: T.handle, c: T.handle) -> None: A = T.match_buffer( a, (WARP_SIZE, local_size), in_dtype, align=64, offset_factor=A_offset_factor, scope="warp", ) B = T.match_buffer( b, (WARP_SIZE, local_size), in_dtype, align=64, offset_factor=B_offset_factor, scope="warp", ) C = T.match_buffer( c, (WARP_SIZE, local_size_out), out_dtype, align=64, offset_factor=out_offset_factor, scope="warp", ) with T.block("root"): T.reads( C[0:WARP_SIZE, 0:local_size_out], A[0:WARP_SIZE, 0:local_size], B[0:WARP_SIZE, 0:local_size], ) T.writes(C[0:WARP_SIZE, 0:local_size_out]) tx = T.env_thread("threadIdx.x") T.launch_thread(tx, WARP_SIZE) T.evaluate( T.ptx_mma( mma_prefix, "row", "col", in_dtype_abbrv, in_dtype_abbrv, out_dtype_abbrv, A.data, A.elem_offset + tx * lift(local_size), B.data, B.elem_offset + tx * lift(local_size), C.data, C.elem_offset + tx * lift(local_size_out), False, dtype=out_dtype, ) ) T.evaluate( T.ptx_mma( mma_prefix, "row", "col", in_dtype_abbrv, in_dtype_abbrv, out_dtype_abbrv, A.data, A.elem_offset + tx * lift(local_size), B.data, B.elem_offset + tx * lift(local_size) + lift(local_size) // 2, C.data, C.elem_offset + tx * lift(local_size_out) + lift(local_size_out) // 2, False, dtype=out_dtype, ) ) return mma_sync_desc, mma_sync_impl def get_mma_fill_intrin(dtype, local_size): zero = IntImm("int32", 0).astype(dtype) # Assume M = N = 16 index_map = shared_16x16_to_ldmatrix_32x8_layout @T.prim_func def mma_fill_desc(a: T.handle) -> None: C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp") with T.block("root"): T.reads() T.writes(C_warp[0:WARP_SIZE, 0:local_size]) for i0, i1 in T.grid(M_DIM, N_DIM): with T.block("C_warp"): i, j = T.axis.remap("SS", [i0, i1]) thread_id, local_id = T.meta_var(index_map(i, j)) T.reads() T.writes(C_warp[thread_id, local_id]) C_warp[thread_id, local_id] = zero @T.prim_func def mma_fill_impl(a: T.handle) -> None: C_warp = T.match_buffer( a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1 ) with T.block("root"): T.reads() T.writes(C_warp[0:WARP_SIZE, 0:local_size]) tx = T.env_thread("threadIdx.x") T.launch_thread(tx, WARP_SIZE) T.evaluate(T.mma_fill(local_size, C_warp.data, C_warp.elem_offset, dtype=dtype)) return mma_fill_desc, mma_fill_impl def get_mma_store_intrin(dtype, local_size, scope="global"): # Assume M = N = 16 index_map = shared_16x16_to_ldmatrix_32x8_layout @T.prim_func def mma_store_desc(a: T.handle, c: T.handle) -> None: C_warp = T.match_buffer(a, [WARP_SIZE, local_size], dtype=dtype, scope="warp") C = T.match_buffer(c, [M_DIM, N_DIM], dtype=dtype, scope=scope) with T.block("root"): T.reads(C_warp[0:WARP_SIZE, 0:local_size]) T.writes(C[0:M_DIM, 0:N_DIM]) for i0, i1 in T.grid(M_DIM, N_DIM): with T.block("C_warp"): v0, v1 = T.axis.remap("SS", [i0, i1]) thread_id, local_id = T.meta_var(index_map(v0, v1)) T.reads(C_warp[thread_id, local_id]) T.writes(C[v0, v1]) C[v0, v1] = C_warp[thread_id, local_id] @T.prim_func def mma_store_impl(a: T.handle, c: T.handle) -> None: s0 = T.int32() s1 = T.int32() C_warp = T.match_buffer( a, [WARP_SIZE, local_size], dtype=dtype, scope="warp", offset_factor=1 ) C = T.match_buffer( c, [M_DIM, N_DIM], dtype=dtype, scope=scope, offset_factor=1, strides=[s0, s1] ) with T.block("root"): T.reads(C_warp[0:WARP_SIZE, 0:local_size]) T.writes(C[0:M_DIM, 0:N_DIM]) tx = T.env_thread("threadIdx.x") T.launch_thread(tx, WARP_SIZE) T.evaluate( T.mma_store( M_DIM, N_DIM, C.access_ptr("w"), C_warp.data, C_warp.elem_offset, s0, dtype=dtype, ) ) return mma_store_desc, mma_store_impl LDMATRIX_16x16_A_INTRIN = "mma.ldmatrix_16x16_a"
TensorIntrin.register(LDMATRIX_16x16_A_INTRIN, *get_ldmatrix_intrin(16, "float16", False, False))
4
2023-12-14 02:37:47+00:00
8k
berlino/gated_linear_attention
kernels/inter_chunk_contribution/fn.py
[ { "identifier": "PreprocessCumSum_GK", "path": "kernels/inter_chunk_contribution/preprocess_cumsum_gk.py", "snippet": "class PreprocessCumSum_GK(torch.autograd.Function):\n @staticmethod\n def forward(ctx, q, k, gk, normalizer_gk=8, clamp_min=-3):\n q = q.contiguous()\n k = k.contiguous()\n gk = gk.contiguous()\n \n B, H, NUM_CHUNK, CHUNK_SIZE, D = q.shape\n\n D_k = k.shape[-1]\n # D_v = v.shape[-1]\n \n \n # (B, H, L, D_K, D_V)\n # , memory_format=torch.contiguous_format)\n # o = torch.empty_like(v).contiguous()\n # share memory's limit.\n # BLOCK_MODEL_K = 128\n # BLOCK_MODEL_V = 128\n #split k\n\n grid = (B * H, NUM_CHUNK)\n ctx.grid = grid \n\n k_reduce = torch.empty_like(k)\n\n q_exp = torch.empty_like(q)\n\n gk_cumsum = torch.empty_like(gk)\n\n gk_last_exp = torch.empty_like(gk[:, :, :, 0], dtype=torch.float32)\n\n _fwd_preprocess_cumsum_gk[grid](\n q, k, gk, gk_cumsum, \n q_exp, k_reduce, gk_last_exp, \n CHUNK_SIZE=CHUNK_SIZE, NUM_CHUNK = NUM_CHUNK, L = CHUNK_SIZE * NUM_CHUNK, normalizer=normalizer_gk, clamp_min=clamp_min,\n D_MODEL_K=D_k, num_warps=8 if D_k >= 512 else 4\n )\n \n\n ctx.grid = grid \n ctx.save_for_backward(q, k, gk, gk_cumsum)\n ctx.normalizer_gk = normalizer_gk\n ctx.clamp_min = clamp_min\n\n return gk_cumsum, k_reduce, q_exp, gk_last_exp\n\n @staticmethod\n def backward(ctx, dgk_cumsum, dk_reduce, dq_exp, dgk_last_exp):\n\n dgk_cumsum = dgk_cumsum.contiguous()\n dk_reduce = dk_reduce.contiguous()\n dq_exp = dq_exp.contiguous()\n dgk_last_exp = dgk_last_exp.contiguous()\n\n q, k, gk, gk_cumsum = ctx.saved_tensors\n grid = ctx.grid\n\n dq = torch.empty_like(q)\n dk = torch.empty_like(k)\n dgk = torch.empty_like(gk)\n\n B, H, NUM_CHUNK, CHUNK_SIZE, D_k = q.shape\n\n\n # D_v = v.shape[-1] \n\n _bwd_preprocess_cumsum_gk[grid](\n q, k, gk, gk_cumsum, \n dq_exp, dk_reduce, dgk_last_exp, dgk_cumsum,\n dq, dk, dgk,\n CHUNK_SIZE=CHUNK_SIZE, NUM_CHUNK = NUM_CHUNK, L = CHUNK_SIZE * NUM_CHUNK, normalizer=ctx.normalizer_gk, clamp_min = ctx.clamp_min,\n D_MODEL_K=D_k, num_warps=8 if D_k >= 512 else 4\n )\n\n return dq, dk, dgk, None, None, None" }, { "identifier": "PreprocessCumSum_GV", "path": "kernels/inter_chunk_contribution/preprocess_cumsum_gv.py", "snippet": "class PreprocessCumSum_GV(torch.autograd.Function):\n @staticmethod\n def forward(ctx, v, gv, normalizer_gv=8, clamp_min=-3):\n v = v.contiguous()\n gv = gv.contiguous()\n \n B, H, NUM_CHUNK, CHUNK_SIZE, D_v = v.shape\n\n\n # D_k = k.shape[-1]\n # D_v = v.shape[-1]\n \n # (B, H, L, D_K, D_V)\n # , memory_format=torch.contiguous_format)\n # o = torch.empty_like(v).contiguous()\n # share memory's limit.\n # BLOCK_MODEL_K = 128\n # BLOCK_MODEL_V = 128\n #split k\n\n grid = (B * H, NUM_CHUNK)\n ctx.grid = grid \n\n\n gv_cumsum = torch.empty_like(gv, dtype=torch.float32) \n gv_cumsum_exp = torch.empty_like(gv)\n v_reduce = torch.empty_like(v)\n gv_last_exp = torch.empty_like(gv[:, :, :, 0], dtype=torch.float32)\n _fwd_preprocess_cumsum_gv[grid](\n v, gv, gv_cumsum, gv_cumsum_exp, \n v_reduce, gv_last_exp, \n CHUNK_SIZE=CHUNK_SIZE, NUM_CHUNK = NUM_CHUNK, L = CHUNK_SIZE * NUM_CHUNK, normalizer=normalizer_gv, clamp_min=clamp_min,\n D_MODEL_V=D_v, num_warps=8 if D_v >= 512 else 4\n ) \n \n ctx.grid = grid \n ctx.save_for_backward(v, gv, gv_cumsum)\n ctx.normalizer_gv = normalizer_gv\n ctx.clamp_min = clamp_min\n\n return gv_cumsum, v_reduce, gv_cumsum_exp, gv_last_exp\n\n\n\n @staticmethod\n def backward(ctx, dgv_cumsum, dv_reduce, dgv_cumsum_exp, dgv_last_exp):\n\n dgv_cumsum = dgv_cumsum.contiguous()\n dv_reduce = dv_reduce.contiguous()\n dgv_cumsum_exp = dgv_cumsum_exp.contiguous()\n dgv_last_exp = dgv_last_exp.contiguous()\n v, gv, gv_cumsum = ctx.saved_tensors\n grid = ctx.grid\n\n B, H, NUM_CHUNK, CHUNK_SIZE, D_v = v.shape\n\n dv = torch.empty_like(v)\n dgv = torch.empty_like(gv) \n _bwd_preprocess_cumsum_gv[grid](\n v, gv, gv_cumsum, dgv_cumsum_exp, dv_reduce, dgv_last_exp, dgv_cumsum, \n dv, dgv, \n CHUNK_SIZE=CHUNK_SIZE, NUM_CHUNK = NUM_CHUNK, L = CHUNK_SIZE * NUM_CHUNK, normalizer=ctx.normalizer_gv, clamp_min = ctx.clamp_min,\n D_MODEL_V=D_v, num_warps=8 if D_v >= 512 else 4 \n ) \n return dv, dgv, None, None, None" }, { "identifier": "Chunk_memory_update_full", "path": "kernels/inter_chunk_contribution/chunk_scan_triton_full.py", "snippet": "class Chunk_memory_update_full(torch.autograd.Function):\n @staticmethod\n def forward(ctx, decay_key_last, decay_value_last, to_add):\n decay_key_last = decay_key_last.contiguous()\n decay_value_last = decay_value_last.contiguous()\n to_add = to_add.contiguous()\n\n B, H, N, D_k, D_v = to_add.shape \n output = torch.empty_like(to_add) \n BLOCK_MODEL = 32\n \n assert D_k % 32 == 0\n assert D_v % 32 == 0\n assert D_k == decay_key_last.shape[-1]\n assert D_v == decay_value_last.shape[-1]\n\n grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)\n ctx.grid = grid \n ctx.BLOCK_MODEL = BLOCK_MODEL\n\n _fwd_recurrence[grid](\n to_add, \n decay_key_last,\n decay_value_last,\n output,\n D_MODEL_K=D_k, D_MODEL_V=D_v,\n NUM_BLOCK=N, \n BLOCK_MODEL=BLOCK_MODEL\n )\n \n\n output[:, :, 0] = 0\n ctx.save_for_backward(output, decay_key_last, decay_value_last) \n \n return output\n\n @staticmethod\n def backward(ctx, DO):\n DO = DO.contiguous()\n\n output, decay_key_last, decay_value_last = ctx.saved_tensors \n\n B, H, N, D_k, D_v = output.shape \n\n num_block = N\n \n BLOCK_MODEL = 32 \n\n grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)\n\n # I don't want atomic_add to be used in the backward pass\n # so I add another dimension to the output tensor (D_k/v // BLOCK_MODEL)\n # afterward, I sum over this dimension to get the correct gradient \n D_p1 = torch.empty(B, H, N, D_v // BLOCK_MODEL, D_k, device=DO.device, dtype=torch.float32)\n D_p2 = torch.empty(B, H, N, D_k // BLOCK_MODEL, D_v, device=DO.device, dtype=torch.float32)\n\n _bwd_recurrence[grid](\n output, decay_key_last, decay_value_last,\n DO, D_p1, D_p2, \n NUM_BLOCK = num_block, NUM_SPLIT_K = D_k // BLOCK_MODEL, NUM_SPLIT_V = D_v // BLOCK_MODEL, \n D_MODEL_K = D_k,\n D_MODEL_V = D_v, \n BLOCK_MODEL = BLOCK_MODEL\n )\n\n output[:, :, -1] = 0\n D_p1[:, :, 0] = 0\n D_p1[:, :, -1] = 0\n D_p2[:, :, 0] = 0\n D_p2[:, :, -1] = 0\n \n return D_p1.sum(-2), D_p2.sum(-2), output " }, { "identifier": "Chunk_memory_update_only_gk", "path": "kernels/inter_chunk_contribution/chunk_scan_triton_only_gk.py", "snippet": "class Chunk_memory_update_only_gk(torch.autograd.Function):\n @staticmethod\n def forward(ctx, decay_key_last, to_add):\n decay_key_last = decay_key_last.contiguous()\n to_add = to_add.contiguous()\n\n B, H, N, D_k, D_v = to_add.shape \n output = torch.empty_like(to_add) \n BLOCK_MODEL = 32\n \n assert D_k % 32 == 0\n assert D_v % 32 == 0\n assert D_k == decay_key_last.shape[-1]\n # assert D_v == to_add.shape[-1]\n\n grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)\n ctx.grid = grid \n ctx.BLOCK_MODEL = BLOCK_MODEL\n\n _fwd_recurrence[grid](\n to_add, \n decay_key_last,\n output,\n D_MODEL_K=D_k, D_MODEL_V=D_v,\n NUM_BLOCK=N, \n BLOCK_MODEL=BLOCK_MODEL\n )\n \n\n output[:, :, 0] = 0\n ctx.save_for_backward(output, decay_key_last) \n \n return output\n\n @staticmethod\n def backward(ctx, DO):\n DO = DO.contiguous()\n\n output, decay_key_last = ctx.saved_tensors \n\n B, H, N, D_k, D_v = output.shape \n\n num_block = N\n \n BLOCK_MODEL = 32 \n\n grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)\n\n # I don't want atomic_add to be used in the backward pass\n # so I add another dimension to the output tensor (D_k/v // BLOCK_MODEL)\n # afterward, I sum over this dimension to get the correct gradient \n D_p1 = torch.empty(B, H, N, D_v // BLOCK_MODEL, D_k, device=DO.device, dtype=torch.float32)\n # D_p2 = torch.empty(B, H, N, D_k // BLOCK_MODEL, D_v, device=DO.device, dtype=torch.float32)\n\n _bwd_recurrence[grid](\n output, decay_key_last, \n DO, D_p1, \n NUM_BLOCK = num_block, NUM_SPLIT_K = D_k // BLOCK_MODEL, NUM_SPLIT_V = D_v // BLOCK_MODEL, \n D_MODEL_K = D_k,\n D_MODEL_V = D_v, \n BLOCK_MODEL = BLOCK_MODEL\n )\n\n output[:, :, -1] = 0\n D_p1[:, :, 0] = 0\n D_p1[:, :, -1] = 0\n \n return D_p1.sum(-2), output " }, { "identifier": "Chunk_memory_update_only_gv", "path": "kernels/inter_chunk_contribution/chunk_scan_triton_only_gv.py", "snippet": "class Chunk_memory_update_only_gv(torch.autograd.Function):\n @staticmethod\n def forward(ctx, decay_value_last, to_add):\n decay_value_last = decay_value_last.contiguous()\n to_add = to_add.contiguous()\n\n B, H, N, D_k, D_v = to_add.shape \n output = torch.empty_like(to_add) \n BLOCK_MODEL = 32\n \n assert D_k % 32 == 0\n assert D_v % 32 == 0\n # assert D_k == decay_key_last.shape[-1]\n assert D_v == decay_value_last.shape[-1]\n\n grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)\n ctx.grid = grid \n ctx.BLOCK_MODEL = BLOCK_MODEL\n\n _fwd_recurrence[grid](\n to_add, \n decay_value_last,\n output,\n D_MODEL_K=D_k, D_MODEL_V=D_v,\n NUM_BLOCK=N, \n BLOCK_MODEL=BLOCK_MODEL\n )\n \n\n output[:, :, 0] = 0\n ctx.save_for_backward(output, decay_value_last) \n \n return output\n\n @staticmethod\n def backward(ctx, DO):\n DO = DO.contiguous()\n\n output, decay_value_last = ctx.saved_tensors \n\n B, H, N, D_k, D_v = output.shape \n\n num_block = N\n \n BLOCK_MODEL = 32 \n\n grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)\n\n # I don't want atomic_add to be used in the backward pass\n # so I add another dimension to the output tensor (D_k/v // BLOCK_MODEL)\n # afterward, I sum over this dimension to get the correct gradient \n D_p2 = torch.empty(B, H, N, D_k // BLOCK_MODEL, D_v, device=DO.device, dtype=torch.float32)\n\n _bwd_recurrence[grid](\n output, decay_value_last,\n DO, D_p2, \n NUM_BLOCK = num_block, NUM_SPLIT_K = D_k // BLOCK_MODEL, NUM_SPLIT_V = D_v // BLOCK_MODEL, \n D_MODEL_K = D_k,\n D_MODEL_V = D_v, \n BLOCK_MODEL = BLOCK_MODEL\n )\n\n output[:, :, -1] = 0\n # D_p1[:, :, 0] = 0\n # D_p1[:, :, -1] = 0\n D_p2[:, :, 0] = 0\n D_p2[:, :, -1] = 0\n \n return D_p2.sum(-2), output " }, { "identifier": "Chunk_memory_update_no_decay", "path": "kernels/inter_chunk_contribution/chunk_scan_triton_no_decay.py", "snippet": "class Chunk_memory_update_no_decay(torch.autograd.Function):\n @staticmethod\n def forward(ctx, to_add):\n # decay_key_last = decay_key_last.contiguous()\n # decay_value_last = decay_value_last.contiguous()\n to_add = to_add.contiguous()\n\n B, H, N, D_k, D_v = to_add.shape \n output = torch.empty_like(to_add) \n BLOCK_MODEL = 32\n \n assert D_k % 32 == 0\n assert D_v % 32 == 0\n # assert D_k == decay_key_last.shape[-1]\n # assert D_v == decay_value_last.shape[-1]\n\n grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)\n ctx.grid = grid \n ctx.BLOCK_MODEL = BLOCK_MODEL\n\n _fwd_recurrence[grid](\n to_add, \n # decay_key_last,\n # decay_value_last,\n output,\n D_MODEL_K=D_k, D_MODEL_V=D_v,\n NUM_BLOCK=N, \n BLOCK_MODEL=BLOCK_MODEL\n )\n\n output[:, :, 0] = 0\n ctx.save_for_backward(output) \n \n return output\n\n\n @staticmethod\n def backward(ctx, DO):\n DO = DO.contiguous()\n\n output, = ctx.saved_tensors \n\n B, H, N, D_k, D_v = output.shape \n\n num_block = N\n \n BLOCK_MODEL = 32 \n\n grid = (B*H, D_k//BLOCK_MODEL, D_v//BLOCK_MODEL)\n\n # I don't want atomic_add to be used in the backward pass\n # so I add another dimension to the output tensor (D_k/v // BLOCK_MODEL)\n # afterward, I sum over this dimension to get the correct gradient \n # D_p1 = torch.empty(B, H, N, D_v // BLOCK_MODEL, D_k, device=DO.device, dtype=torch.float32)\n # D_p2 = torch.empty(B, H, N, D_k // BLOCK_MODEL, D_v, device=DO.device, dtype=torch.float32)\n\n _bwd_recurrence[grid](\n output, \n DO, \n NUM_BLOCK = num_block, NUM_SPLIT_K = D_k // BLOCK_MODEL, NUM_SPLIT_V = D_v // BLOCK_MODEL, \n D_MODEL_K = D_k,\n D_MODEL_V = D_v, \n BLOCK_MODEL = BLOCK_MODEL\n )\n\n output[:, :, -1] = 0\n \n return output " } ]
from .preprocess_cumsum_gk import PreprocessCumSum_GK from .preprocess_cumsum_gv import PreprocessCumSum_GV from .chunk_scan_triton_full import Chunk_memory_update_full from .chunk_scan_triton_only_gk import Chunk_memory_update_only_gk from .chunk_scan_triton_only_gv import Chunk_memory_update_only_gv from .chunk_scan_triton_no_decay import Chunk_memory_update_no_decay
4,662
def inter_chunk_onc(query, key, value, gk, gv, normalizer_gk=16, normalizer_gv=16, clam_min=-3): if gk is not None: g_key_cumsum, reduce_key, q_exp, g_key_last_exp = PreprocessCumSum_GK.apply(query, key, gk, normalizer_gk, clam_min) else: reduce_key = key q_exp = None g_key_cumsum = None g_key_last_exp = None # gv_cumsum, v_reduce, gv_cumsum_exp, gv_last_exp if gv is not None: g_value_cumsum, reduce_value, g_value_cumsum_exp, g_value_last_exp = PreprocessCumSum_GV.apply( value, gv, normalizer_gv, clam_min) else: reduce_value = value g_value_cumsum = None g_value_last_exp = None to_add = reduce_key.transpose(-1, -2) @ reduce_value if gk is not None and gv is not None: memory_cache = Chunk_memory_update_full.apply(g_key_last_exp, g_value_last_exp, to_add) inter_chunk_contribution = ((q_exp) @ memory_cache) * g_value_cumsum_exp elif gk is None and gv is not None: memory_cache = Chunk_memory_update_only_gv.apply(g_value_last_exp, to_add) inter_chunk_contribution = ((query) @ memory_cache) * g_value_cumsum_exp elif gk is not None and gv is None:
def inter_chunk_onc(query, key, value, gk, gv, normalizer_gk=16, normalizer_gv=16, clam_min=-3): if gk is not None: g_key_cumsum, reduce_key, q_exp, g_key_last_exp = PreprocessCumSum_GK.apply(query, key, gk, normalizer_gk, clam_min) else: reduce_key = key q_exp = None g_key_cumsum = None g_key_last_exp = None # gv_cumsum, v_reduce, gv_cumsum_exp, gv_last_exp if gv is not None: g_value_cumsum, reduce_value, g_value_cumsum_exp, g_value_last_exp = PreprocessCumSum_GV.apply( value, gv, normalizer_gv, clam_min) else: reduce_value = value g_value_cumsum = None g_value_last_exp = None to_add = reduce_key.transpose(-1, -2) @ reduce_value if gk is not None and gv is not None: memory_cache = Chunk_memory_update_full.apply(g_key_last_exp, g_value_last_exp, to_add) inter_chunk_contribution = ((q_exp) @ memory_cache) * g_value_cumsum_exp elif gk is None and gv is not None: memory_cache = Chunk_memory_update_only_gv.apply(g_value_last_exp, to_add) inter_chunk_contribution = ((query) @ memory_cache) * g_value_cumsum_exp elif gk is not None and gv is None:
memory_cache = Chunk_memory_update_only_gk.apply(g_key_last_exp, to_add)
3
2023-12-11 18:13:44+00:00
8k
kakaobrain/honeybee
serve/web_server.py
[ { "identifier": "default_conversation", "path": "serve/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_index(self, num_frames, num_segments):\n def load_video(self, path, num_frames=4):\n def get_images(self, log_dir=None):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "code_highlight_css", "path": "serve/gradio_css.py", "snippet": "" }, { "identifier": "post_process_code", "path": "serve/model_utils.py", "snippet": "def post_process_code(code):\n sep = \"\\n```\"\n if sep in code:\n blocks = code.split(sep)\n if len(blocks) % 2 == 1:\n for i in range(1, len(blocks), 2):\n blocks[i] = blocks[i].replace(\"\\\\_\", \"_\")\n code = sep.join(blocks)\n return code" }, { "identifier": "Honeybee_Server", "path": "serve/model_worker.py", "snippet": "class Honeybee_Server:\n def __init__(\n self,\n base_model=\"checkpoints/7B-C-Abs-M144/last\",\n log_dir=\"./\",\n load_in_8bit=False,\n bf16=True,\n device=\"cuda\",\n io=None,\n ):\n self.log_dir = log_dir\n\n self.model, self.tokenizer, self.processor = get_model(\n base_model,\n use_bf16=bf16,\n load_in_8bit=load_in_8bit,\n )\n self.model.to(device)\n\n self.bf16 = bf16\n self.load_in_8bit = load_in_8bit\n\n if not load_in_8bit:\n if bf16:\n self.model.bfloat16()\n else:\n self.model.half()\n self.model.eval()\n\n self.io = io\n\n def evaluate(\n self,\n pixel_values=None,\n input_ids=None,\n temperature=1.0,\n top_p=0.9,\n top_k=5,\n num_beams=3,\n max_new_tokens=256,\n stream_output=True,\n length_penalty=1.0,\n no_repeat_ngram_size=2,\n do_sample=False,\n early_stopping=True,\n **kwargs\n ):\n generation_config = {\n \"temperature\": temperature,\n \"top_p\": top_p,\n \"top_k\": top_k,\n \"num_beams\": num_beams,\n \"no_repeat_ngram_size\": no_repeat_ngram_size,\n \"do_sample\": do_sample,\n \"early_stopping\": early_stopping,\n \"length_penalty\": length_penalty,\n }\n\n generate_params = {\n \"pixel_values\": pixel_values,\n \"input_ids\": input_ids,\n \"return_dict_in_generate\": True,\n \"output_scores\": True,\n \"max_new_tokens\": max_new_tokens,\n }\n generate_params.update(generation_config)\n\n if stream_output:\n # Stream the reply 1 token at a time.\n # This is based on the trick of using 'stopping_criteria' to create an iterator,\n # from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/text_generation.py#L216-L243.\n\n def generate_with_callback(callback=None, **kwargs):\n kwargs.setdefault(\"stopping_criteria\", transformers.StoppingCriteriaList())\n kwargs[\"stopping_criteria\"].append(Stream(callback_func=callback))\n with torch.no_grad():\n self.model.generate(**kwargs)\n\n def generate_with_streaming(**kwargs):\n return Iteratorize(generate_with_callback, kwargs, callback=None)\n\n with generate_with_streaming(**generate_params) as generator:\n for output in generator:\n # new_tokens = len(output) - len(input_ids[0])\n decoded_output = self.tokenizer.decode(output)\n\n if output[-1] in [self.tokenizer.eos_token_id]:\n break\n\n yield post_process_output(decoded_output)\n return # early return for stream_output\n\n with torch.no_grad():\n generation_output = self.model.generate(\n pixel_values=pixel_values,\n input_ids=input_ids,\n return_dict_in_generate=True,\n output_scores=True,\n max_new_tokens=max_new_tokens,\n **generation_config\n )\n s = generation_output.sequences[0].cpu()\n output = self.tokenizer.decode(s)\n yield post_process_output(output)\n\n def predict(self, data):\n prompt = [data[\"text_input\"]]\n images = data[\"images\"] if len(data[\"images\"]) > 0 else None\n if images:\n images = [Image.open(BytesIO(base64.b64decode(image))) for image in images]\n inputs = self.processor(text=prompt, images=images, return_tensors=\"pt\")\n\n input_ids = inputs[\"input_ids\"].to(self.model.device)\n if \"pixel_values\" in inputs:\n if self.load_in_8bit:\n pixel_values = inputs[\"pixel_values\"].half().to(self.model.device)\n elif self.bf16:\n pixel_values = inputs[\"pixel_values\"].bfloat16().to(self.model.device)\n else:\n pixel_values = inputs[\"pixel_values\"].half().to(self.model.device)\n else:\n pixel_values = None\n\n cache = None\n\n try:\n for x in self.evaluate(\n pixel_values, input_ids, stream_output=True, **data[\"generation_config\"]\n ):\n cache = x # noqa: F841\n yield (x, True)\n except ValueError as e:\n print(\"Caught ValueError:\", e)\n yield (server_error_msg, False)\n except torch.cuda.CudaError as e:\n print(\"Caught torch.cuda.CudaError:\", e)\n yield (server_error_msg, False)\n\n return" }, { "identifier": "add_text", "path": "serve/serve_utils.py", "snippet": "def add_text(state, text, image, video, request: gr.Request):\n if len(text) <= 0 and (image is None or video is None):\n state.skip_next = True\n return (state, state.to_gradio_chatbot(), \"\", None, None) + (no_change_btn,) * 5\n\n if image is not None:\n if \"<image>\" not in text:\n text = text + \"\\n<image>\"\n text = (text, image)\n\n if video is not None:\n num_frames = 4\n if \"<image>\" not in text:\n text = text + \"\\n<image>\" * num_frames\n text = (text, video)\n\n state.append_message(state.roles[0], text)\n state.append_message(state.roles[1], None)\n state.skip_next = False\n return (state, state.to_gradio_chatbot(), \"\", None, None) + (disable_btn,) * 5" }, { "identifier": "regenerate", "path": "serve/serve_utils.py", "snippet": "def regenerate(state, request: gr.Request):\n state.messages[-1][-1] = None\n state.skip_next = False\n return (state, state.to_gradio_chatbot(), \"\", None, None) + (disable_btn,) * 5" }, { "identifier": "after_process_image", "path": "serve/serve_utils.py", "snippet": "class _IOWrapper:\n def __init__(self):\n def set_io(self, new_io):\n def __getattr__(self, name):\n def __str__(self):\ndef init():\ndef vote_last_response(state, vote_type, model_selector, request: gr.Request):\ndef upvote_last_response(state, model_selector, request: gr.Request):\ndef downvote_last_response(state, model_selector, request: gr.Request):\ndef flag_last_response(state, model_selector, request: gr.Request):\ndef regenerate(state, request: gr.Request):\ndef clear_history(request: gr.Request):\ndef add_text(state, text, image, video, request: gr.Request):\ndef after_process_image(prompt):" } ]
import argparse import json import os import time import gradio as gr import requests import torch from .conversation import default_conversation from .gradio_css import code_highlight_css from .model_utils import post_process_code from .model_worker import Honeybee_Server from .serve_utils import add_text # noqa: F401 from .serve_utils import regenerate # noqa: F401 from .serve_utils import ( after_process_image, clear_history, disable_btn, downvote_last_response, enable_btn, flag_last_response, get_window_url_params, init, no_change_btn, upvote_last_response, )
4,786
with gr.Row(): with gr.Column(scale=3): imagebox = gr.Image(type="pil") # dataset for selecting OwlEval data owleval = load_jsonl("data/OwlEval/questions.jsonl") owleval_data = gr.Dataset( components=[imagebox, textbox], label="OwlEval Examples", samples=[ [os.path.join("data/OwlEval", "images", it["image"]), it["question"]] for it in owleval ], ) with gr.Accordion("Parameters", open=False, visible=False) as parameter_row: max_output_tokens = gr.Slider( 0, 1024, 512, step=64, interactive=True, label="Max output tokens" ) temperature = gr.Slider( 0, 1, 1, step=0.1, interactive=True, label="Temperature" ) top_k = gr.Slider(1, 5, 3, step=1, interactive=True, label="Top K") top_p = gr.Slider(0, 1, 0.9, step=0.1, interactive=True, label="Top p") length_penalty = gr.Slider( 1, 5, 1, step=0.1, interactive=True, label="length_penalty" ) num_beams = gr.Slider(1, 5, 1, step=1, interactive=True, label="Beam Size") no_repeat_ngram_size = gr.Slider( 1, 5, 2, step=1, interactive=True, label="no_repeat_ngram_size" ) do_sample = gr.Checkbox(interactive=True, value=True, label="do_sample") videobox = gr.Video(visible=False) # [M-LLM] currently, we do not support video with gr.Column(scale=6): chatbot = gr.Chatbot(elem_id="chatbot", visible=False, height=1000) with gr.Row(): with gr.Column(scale=8): textbox.render() with gr.Column(scale=1, min_width=60): submit_btn = gr.Button(value="Submit", visible=False) with gr.Row(visible=False) as button_row: upvote_btn = gr.Button(value="👍 Upvote", interactive=False) downvote_btn = gr.Button(value="👎 Downvote", interactive=False) flag_btn = gr.Button(value="⚠️ Flag", interactive=False) regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False) clear_btn = gr.Button(value="🗑️ Clear history", interactive=False) gr.Markdown(learn_more_markdown) url_params = gr.JSON(visible=False) owleval_data.click(fn=set_dataset, inputs=owleval_data, outputs=owleval_data.components) btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn] parameter_list = [ max_output_tokens, temperature, top_k, top_p, num_beams, no_repeat_ngram_size, length_penalty, do_sample, ] upvote_btn.click( upvote_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn] ) downvote_btn.click( downvote_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn] ) flag_btn.click(flag_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn]) regenerate_btn.click( regenerate_http_bot, [state] + parameter_list, [state, chatbot, textbox, imagebox, videobox] + btn_list, ) clear_btn.click( clear_history, None, [state, chatbot, textbox, imagebox, videobox] + btn_list ) textbox.submit( add_text_http_bot, [state, textbox, imagebox, videobox] + parameter_list, [state, chatbot, textbox, imagebox, videobox] + btn_list, ) submit_btn.click( add_text_http_bot, [state, textbox, imagebox, videobox] + parameter_list, [state, chatbot, textbox, imagebox, videobox] + btn_list, ) demo.load( load_demo, [url_params], [state, chatbot, textbox, submit_btn, button_row, parameter_row], _js=get_window_url_params, ) return demo if __name__ == "__main__": io = init() cur_dir = os.path.dirname(os.path.abspath(__file__)) log_dir = cur_dir[:-9] + "log" parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="0.0.0.0") parser.add_argument("--debug", action="store_true", help="using debug mode") parser.add_argument("--port", type=int) parser.add_argument("--concurrency-count", type=int, default=100) parser.add_argument("--base-model", type=str, default="checkpoints/7B-C-Abs-M144/last") parser.add_argument("--load-8bit", action="store_true", help="using 8bit mode") parser.add_argument("--bf16", action="store_true", help="using 8bit mode") args = parser.parse_args() print(" >>> Init server")
# Reference: https://huggingface.co/spaces/MAGAer13/mPLUG-Owl/tree/main def load_jsonl(filename): with open(filename, "r", encoding="utf-8") as f: return [json.loads(line.strip("\n")) for line in f.readlines()] def set_dataset(example: list): return gr.Image.update(value=example[0]), gr.Textbox.update(value=example[1]) def set_example_text_input(example_text: str) -> dict: # for the example query texts return gr.Textbox.update(value=example_text[0]) def load_demo(url_params, request: gr.Request): dropdown_update = gr.Dropdown.update(visible=True) state = default_conversation.copy() return ( state, dropdown_update, gr.Chatbot.update(visible=True), gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Row.update(visible=True), gr.Accordion.update(visible=True), ) def add_text_http_bot( state, text, image, video, max_output_tokens, temperature, top_k, top_p, num_beams, no_repeat_ngram_size, length_penalty, do_sample, request: gr.Request, ): if len(text) <= 0 and (image is None or video is None): state.skip_next = True return (state, state.to_gradio_chatbot(), "", None, None) + (no_change_btn,) * 5 if image is not None: if "<image>" not in text: text = text + "\n<image>" text = (text, image) if video is not None: num_frames = 4 if "<image>" not in text: text = text + "\n<image>" * num_frames text = (text, video) state.append_message(state.roles[0], text) state.append_message(state.roles[1], None) state.skip_next = False yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5 if state.skip_next: # This generate call is skipped due to invalid inputs yield (state, state.to_gradio_chatbot(), "", None, None) + (no_change_btn,) * 5 return prompt = after_process_image(state.get_prompt()) images = state.get_images() data = { "text_input": prompt, "images": images if len(images) > 0 else [], "generation_config": { "top_k": int(top_k), "top_p": float(top_p), "num_beams": int(num_beams), "no_repeat_ngram_size": int(no_repeat_ngram_size), "length_penalty": float(length_penalty), "do_sample": bool(do_sample), "temperature": float(temperature), "max_new_tokens": min(int(max_output_tokens), 1536), }, } state.messages[-1][-1] = "▌" yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5 try: for chunk in model.predict(data): if chunk: if chunk[1]: output = chunk[0].strip() output = post_process_code(output) state.messages[-1][-1] = output + "▌" yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5 else: output = chunk[0].strip() state.messages[-1][-1] = output yield (state, state.to_gradio_chatbot(), "", None, None) + ( disable_btn, disable_btn, disable_btn, enable_btn, enable_btn, ) return time.sleep(0.03) except requests.exceptions.RequestException as e: # noqa: F841 state.messages[-1][ -1 ] = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" yield (state, state.to_gradio_chatbot(), "", None, None) + ( disable_btn, disable_btn, disable_btn, enable_btn, enable_btn, ) return state.messages[-1][-1] = state.messages[-1][-1][:-1] yield (state, state.to_gradio_chatbot(), "", None, None) + (enable_btn,) * 5 def regenerate_http_bot( state, max_output_tokens, temperature, top_k, top_p, num_beams, no_repeat_ngram_size, length_penalty, do_sample, request: gr.Request, ): state.messages[-1][-1] = None state.skip_next = False yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5 prompt = after_process_image(state.get_prompt()) images = state.get_images() data = { "text_input": prompt, "images": images if len(images) > 0 else [], "generation_config": { "top_k": int(top_k), "top_p": float(top_p), "num_beams": int(num_beams), "no_repeat_ngram_size": int(no_repeat_ngram_size), "length_penalty": float(length_penalty), "do_sample": bool(do_sample), "temperature": float(temperature), "max_new_tokens": min(int(max_output_tokens), 1536), }, } state.messages[-1][-1] = "▌" yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5 try: for chunk in model.predict(data): if chunk: if chunk[1]: output = chunk[0].strip() output = post_process_code(output) state.messages[-1][-1] = output + "▌" yield (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5 else: output = chunk[0].strip() state.messages[-1][-1] = output yield (state, state.to_gradio_chatbot(), "", None, None) + ( disable_btn, disable_btn, disable_btn, enable_btn, enable_btn, ) return time.sleep(0.03) except requests.exceptions.RequestException as e: # noqa: F841 state.messages[-1][ -1 ] = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" yield (state, state.to_gradio_chatbot(), "", None, None) + ( disable_btn, disable_btn, disable_btn, enable_btn, enable_btn, ) return state.messages[-1][-1] = state.messages[-1][-1][:-1] yield (state, state.to_gradio_chatbot(), "", None, None) + (enable_btn,) * 5 title_markdown = """ **Notice**: The output is generated by top-k sampling scheme and may involve some randomness. """ tos_markdown = """ ### Terms of use By using this service, users are required to agree to the following terms: The service is a research preview intended for non-commercial use only. It only provides limited safety measures and may generate offensive content. It must not be used for any illegal, harmful, violent, racist, or sexual purposes. The service may collect user dialogue data for future research. Please click the "Flag" button if you get any inappropriate answer! We will collect those to keep improving our moderator. For an optimal experience, please use desktop computers for this demo, as mobile devices may compromise its quality. **Copyright 2023 Alibaba DAMO Academy.** """ learn_more_markdown = """ ### License The service is a research preview intended for non-commercial use only, subject to the model [License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT. Please contact us if you find any potential violation. """ css = ( code_highlight_css + """ pre { white-space: pre-wrap; /* Since CSS 2.1 */ white-space: -moz-pre-wrap; /* Mozilla, since 1999 */ white-space: -pre-wrap; /* Opera 4-6 */ white-space: -o-pre-wrap; /* Opera 7 */ word-wrap: break-word; /* Internet Explorer 5.5+ */ } """ ) def build_demo(model_name: str = "M-LLM"): title_model_name = f"""<h1 align="center">{model_name} </h1>""" # with gr.Blocks(title="mPLUG-Owl🦉", theme=gr.themes.Base(), css=css) as demo: textbox = gr.Textbox( show_label=False, placeholder="Enter text and press ENTER", visible=False, container=False ) with gr.Blocks(title="M-LLM", css=css) as demo: state = gr.State() gr.Markdown(title_model_name) gr.Markdown(title_markdown) with gr.Row(): with gr.Column(scale=3): imagebox = gr.Image(type="pil") # dataset for selecting OwlEval data owleval = load_jsonl("data/OwlEval/questions.jsonl") owleval_data = gr.Dataset( components=[imagebox, textbox], label="OwlEval Examples", samples=[ [os.path.join("data/OwlEval", "images", it["image"]), it["question"]] for it in owleval ], ) with gr.Accordion("Parameters", open=False, visible=False) as parameter_row: max_output_tokens = gr.Slider( 0, 1024, 512, step=64, interactive=True, label="Max output tokens" ) temperature = gr.Slider( 0, 1, 1, step=0.1, interactive=True, label="Temperature" ) top_k = gr.Slider(1, 5, 3, step=1, interactive=True, label="Top K") top_p = gr.Slider(0, 1, 0.9, step=0.1, interactive=True, label="Top p") length_penalty = gr.Slider( 1, 5, 1, step=0.1, interactive=True, label="length_penalty" ) num_beams = gr.Slider(1, 5, 1, step=1, interactive=True, label="Beam Size") no_repeat_ngram_size = gr.Slider( 1, 5, 2, step=1, interactive=True, label="no_repeat_ngram_size" ) do_sample = gr.Checkbox(interactive=True, value=True, label="do_sample") videobox = gr.Video(visible=False) # [M-LLM] currently, we do not support video with gr.Column(scale=6): chatbot = gr.Chatbot(elem_id="chatbot", visible=False, height=1000) with gr.Row(): with gr.Column(scale=8): textbox.render() with gr.Column(scale=1, min_width=60): submit_btn = gr.Button(value="Submit", visible=False) with gr.Row(visible=False) as button_row: upvote_btn = gr.Button(value="👍 Upvote", interactive=False) downvote_btn = gr.Button(value="👎 Downvote", interactive=False) flag_btn = gr.Button(value="⚠️ Flag", interactive=False) regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False) clear_btn = gr.Button(value="🗑️ Clear history", interactive=False) gr.Markdown(learn_more_markdown) url_params = gr.JSON(visible=False) owleval_data.click(fn=set_dataset, inputs=owleval_data, outputs=owleval_data.components) btn_list = [upvote_btn, downvote_btn, flag_btn, regenerate_btn, clear_btn] parameter_list = [ max_output_tokens, temperature, top_k, top_p, num_beams, no_repeat_ngram_size, length_penalty, do_sample, ] upvote_btn.click( upvote_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn] ) downvote_btn.click( downvote_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn] ) flag_btn.click(flag_last_response, [state], [textbox, upvote_btn, downvote_btn, flag_btn]) regenerate_btn.click( regenerate_http_bot, [state] + parameter_list, [state, chatbot, textbox, imagebox, videobox] + btn_list, ) clear_btn.click( clear_history, None, [state, chatbot, textbox, imagebox, videobox] + btn_list ) textbox.submit( add_text_http_bot, [state, textbox, imagebox, videobox] + parameter_list, [state, chatbot, textbox, imagebox, videobox] + btn_list, ) submit_btn.click( add_text_http_bot, [state, textbox, imagebox, videobox] + parameter_list, [state, chatbot, textbox, imagebox, videobox] + btn_list, ) demo.load( load_demo, [url_params], [state, chatbot, textbox, submit_btn, button_row, parameter_row], _js=get_window_url_params, ) return demo if __name__ == "__main__": io = init() cur_dir = os.path.dirname(os.path.abspath(__file__)) log_dir = cur_dir[:-9] + "log" parser = argparse.ArgumentParser() parser.add_argument("--host", type=str, default="0.0.0.0") parser.add_argument("--debug", action="store_true", help="using debug mode") parser.add_argument("--port", type=int) parser.add_argument("--concurrency-count", type=int, default=100) parser.add_argument("--base-model", type=str, default="checkpoints/7B-C-Abs-M144/last") parser.add_argument("--load-8bit", action="store_true", help="using 8bit mode") parser.add_argument("--bf16", action="store_true", help="using 8bit mode") args = parser.parse_args() print(" >>> Init server")
model = Honeybee_Server(
3
2023-12-06 14:48:41+00:00
8k
taikinman/langrila
src/langrila/chat_module/function_calling.py
[ { "identifier": "BaseConversationLengthAdjuster", "path": "src/langrila/base.py", "snippet": "class BaseConversationLengthAdjuster(ABC):\n @abstractmethod\n def run(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:\n raise NotImplementedError\n\n def __call__(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:\n return self.run(messages)" }, { "identifier": "BaseFilter", "path": "src/langrila/base.py", "snippet": "class BaseFilter(ABC):\n @abstractmethod\n def apply(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:\n raise NotImplementedError\n\n @abstractmethod\n def restore(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:\n raise NotImplementedError" }, { "identifier": "BaseModule", "path": "src/langrila/base.py", "snippet": "class BaseModule(ABC):\n @abstractmethod\n def run(self, *args, **kwargs):\n raise NotImplementedError\n\n async def arun(self, *args, **kwargs):\n raise NotImplementedError\n\n def stream(self, *args, **kwargs):\n raise NotImplementedError\n\n async def astream(self, *args, **kwargs):\n raise NotImplementedError\n\n def __call__(self, *args, **kwargs):\n _async = kwargs.pop(\"arun\", False)\n _stream = kwargs.pop(\"stream\", False)\n if _async:\n if _stream:\n return self.astream(*args, **kwargs)\n else:\n return asyncio.create_task(self.arun(*args, **kwargs))\n else:\n if _stream:\n return self.stream(*args, **kwargs)\n else:\n return self.run(*args, **kwargs)" }, { "identifier": "OldConversationTruncationModule", "path": "src/langrila/conversation_adjuster/truncate.py", "snippet": "class OldConversationTruncationModule(BaseConversationLengthAdjuster):\n \"\"\"\n Adjust the number of tokens to be less than or equal to context_length, starting from the oldest message forward\n \"\"\"\n\n def __init__(self, model_name: str, context_length: int):\n if model_name in MODEL_POINT.keys():\n print(f\"{model_name} is automatically converted to {MODEL_POINT[model_name]}\")\n model_name = MODEL_POINT[model_name]\n\n assert (\n model_name in MODEL_CONFIG.keys()\n ), f\"model_name must be one of {', '.join(sorted(MODEL_CONFIG.keys()))}.\"\n\n self.model_name = model_name\n self.context_length = context_length\n\n def run(self, messages: list[dict[str, str]]) -> list[dict[str, str]]:\n adjusted_messages: list[dict[str, str]] = []\n total_n_tokens: int = 0\n for message in messages[::-1]:\n if total_n_tokens <= self.context_length:\n message, total_n_tokens = self.adjust_message_length_and_update_total_tokens(\n message, total_n_tokens\n )\n\n if message is not None:\n adjusted_messages.append(message)\n return adjusted_messages[::-1]\n\n def adjust_message_length_and_update_total_tokens(\n self, message: dict[str, Any], total_n_tokens: int = 0\n ) -> str:\n n_tokens = get_n_tokens(message, self.model_name)\n if total_n_tokens + n_tokens[\"total\"] <= self.context_length:\n total_n_tokens += n_tokens[\"total\"]\n return message, total_n_tokens\n else:\n available_n_tokens = max(\n self.context_length - total_n_tokens - n_tokens[\"other\"], 0\n ) # available_n_tokens for content\n if available_n_tokens > 0:\n if isinstance(message[\"content\"], str):\n message[\"content\"] = self.truncate(message[\"content\"], available_n_tokens)\n total_n_tokens += available_n_tokens + n_tokens[\"other\"]\n print(\n \"Input message is truncated because total length of messages exceeds context length.\"\n )\n return message, total_n_tokens\n elif \"vision\" in self.model_name and isinstance(message[\"content\"], list):\n return None, total_n_tokens # truncate whole image\n else:\n raise ValueError(\n f\"message['content'] must be str or list, but {type(message['content'])} is given.\"\n )\n else:\n return None, total_n_tokens\n\n def truncate(self, text: str, n_tokens: int) -> str:\n try:\n TOKENIZER = tiktoken.encoding_for_model(self.model_name)\n except KeyError:\n print(\"Warning: model not found. Using cl100k_base encoding.\")\n TOKENIZER = tiktoken.get_encoding(\"cl100k_base\")\n\n if n_tokens > 0:\n return TOKENIZER.decode(TOKENIZER.encode(text)[-n_tokens:])\n else:\n return \"\"" }, { "identifier": "Message", "path": "src/langrila/message.py", "snippet": "class Message(BaseModel):\n content: str\n images: Any | list[Any] | None = None\n image_resolution: str | None = None\n\n @property\n def as_system(self):\n return {\"role\": \"system\", \"content\": self.content}\n\n @property\n def as_user(self):\n if self.images:\n content = [{\"type\": \"text\", \"text\": self.content}]\n if not isinstance(self.images, list):\n images = [self.images]\n else:\n images = self.images\n\n for image in images:\n content.append(\n {\n \"type\": \"image_url\",\n \"image_url\": {\n \"url\": f\"data:image/jpeg;base64,{encode_image(image)}\",\n \"detail\": self.image_resolution if self.image_resolution else \"low\",\n },\n }\n )\n return {\"role\": \"user\", \"content\": content}\n else:\n return {\"role\": \"user\", \"content\": self.content}\n\n @property\n def as_assistant(self):\n return {\"role\": \"assistant\", \"content\": self.content}\n\n @property\n def as_tool(self):\n return {\"role\": \"tool\", \"content\": self.content}\n\n @property\n def as_function(self):\n return {\"role\": \"function\", \"content\": self.content}\n\n @field_validator(\"image_resolution\")\n def check_image_resolution_value(cls, val):\n if val not in [\"low\", \"high\"]:\n raise ValueError(\n \"image_resolution must be either 'low' or 'high' due to token management.\"\n )\n return val" }, { "identifier": "_NEWER_MODEL_CONFIG", "path": "src/langrila/model_config.py", "snippet": "_NEWER_MODEL_CONFIG = {\n \"gpt-4-1106-preview\": {\n \"max_tokens\": 128000,\n \"prompt_cost_per_token\": 0.00001,\n \"completion_cost_per_token\": 0.00003,\n },\n \"gpt-4-vision-preview\": {\n \"max_tokens\": 128000,\n \"prompt_cost_per_token\": 0.00001,\n \"completion_cost_per_token\": 0.00003,\n },\n \"gpt-3.5-turbo-1106\": {\n \"max_tokens\": 4096,\n \"prompt_cost_per_token\": 0.000001,\n \"completion_cost_per_token\": 0.000002,\n },\n}" }, { "identifier": "_OLDER_MODEL_CONFIG", "path": "src/langrila/model_config.py", "snippet": "_OLDER_MODEL_CONFIG = {\n \"gpt-4-0314\": {\n \"max_tokens\": 8192,\n \"prompt_cost_per_token\": 0.00003,\n \"completion_cost_per_token\": 0.00006,\n },\n \"gpt-4-0613\": {\n \"max_tokens\": 8192,\n \"prompt_cost_per_token\": 0.00003,\n \"completion_cost_per_token\": 0.00006,\n },\n \"gpt-4-32k-0314\": {\n \"max_tokens\": 32768,\n \"prompt_cost_per_token\": 0.00006,\n \"completion_cost_per_token\": 0.00012,\n },\n \"gpt-4-32k-0613\": {\n \"max_tokens\": 32768,\n \"prompt_cost_per_token\": 0.00006,\n \"completion_cost_per_token\": 0.00012,\n },\n \"gpt-3.5-turbo-0301\": {\n \"max_tokens\": 4096,\n \"prompt_cost_per_token\": 0.0000015,\n \"completion_cost_per_token\": 0.000002,\n },\n \"gpt-3.5-turbo-0613\": {\n \"max_tokens\": 4096,\n \"prompt_cost_per_token\": 0.0000015,\n \"completion_cost_per_token\": 0.000002,\n },\n \"gpt-3.5-turbo-16k-0613\": {\n \"max_tokens\": 16384,\n \"prompt_cost_per_token\": 0.000003,\n \"completion_cost_per_token\": 0.000004,\n },\n \"gpt-3.5-turbo-instruct\": {\n \"max_tokens\": 8192,\n \"prompt_cost_per_token\": 0.0000015,\n \"completion_cost_per_token\": 0.000002,\n },\n}" }, { "identifier": "MODEL_CONFIG", "path": "src/langrila/model_config.py", "snippet": "MODEL_CONFIG = {}" }, { "identifier": "MODEL_POINT", "path": "src/langrila/model_config.py", "snippet": "MODEL_POINT = {\n \"gpt-4\": \"gpt-4-0613\",\n \"gpt-4-32k\": \"gpt-4-32k-0613\",\n \"gpt-4-128k\": \"gpt-4-1106-preview\",\n \"gpt-4-vision\": \"gpt-4-vision-preview\",\n \"gpt-3.5-turbo\": \"gpt-3.5-turbo-0613\",\n \"gpt-3.5-turbo-16k\": \"gpt-3.5-turbo-16k-0613\",\n}" }, { "identifier": "FunctionCallingResults", "path": "src/langrila/result.py", "snippet": "class FunctionCallingResults(BaseModel):\n usage: Usage\n results: list[ToolOutput]\n prompt: Optional[str | dict[str, str] | list[dict[str, str]]] = None" }, { "identifier": "ToolOutput", "path": "src/langrila/result.py", "snippet": "class ToolOutput(BaseModel):\n call_id: str | None\n funcname: str | None\n args: str | None\n output: Any" }, { "identifier": "Usage", "path": "src/langrila/usage.py", "snippet": "class Usage(BaseModel):\n prompt_tokens: int = 0\n completion_tokens: int = 0\n\n def __add__(self, other: __class__ | dict | CompletionUsage):\n if isinstance(other, dict):\n other = Usage(**other)\n\n if hasattr(other, 'prompt_tokens'):\n prompt_tokens = self.prompt_tokens + other.prompt_tokens\n else:\n prompt_tokens = self.prompt_tokens\n if hasattr(other, 'completion_tokens'):\n completion_tokens = self.completion_tokens + other.completion_tokens\n else:\n completion_tokens = self.completion_tokens\n return Usage(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n )\n\n def __sub__(self, other: __class__ | dict | CompletionUsage):\n if isinstance(other, dict):\n other = Usage(**other)\n\n if hasattr(other, 'prompt_tokens'):\n prompt_tokens = self.prompt_tokens - other.prompt_tokens\n else:\n prompt_tokens = self.prompt_tokens\n if hasattr(other, 'completion_tokens'):\n completion_tokens = self.completion_tokens - other.completion_tokens\n else:\n completion_tokens = self.completion_tokens\n return Usage(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n )\n\n @property\n def total_tokens(self):\n return self.prompt_tokens + self.completion_tokens\n\n @field_validator('prompt_tokens')\n def check_prompt_tokens(cls, v):\n if v < 0:\n raise ValueError('prompt_tokens must be greater or equal to 0')\n return v\n\n @field_validator('completion_tokens')\n def check_completion_tokens(cls, v):\n if v < 0:\n raise ValueError('completion_tokens must be greater or equal to 0')\n return v\n\n def __repr__(self):\n return f'Usage(prompt_tokens={self.prompt_tokens}, completion_tokens={self.completion_tokens}, total_tokens={self.total_tokens})'" }, { "identifier": "get_async_client", "path": "src/langrila/utils.py", "snippet": "def get_async_client(\n api_key_env_name: str,\n api_version: Optional[str] = None,\n endpoint_env_name: Optional[str] = None,\n organization_id_env_name: Optional[str] = None,\n deployment_id_env_name: Optional[str] = None,\n api_type: Optional[str] = \"openai\",\n timeout: int = 60,\n max_retries: int = 5,\n):\n if api_type == \"azure\":\n return AsyncAzureOpenAI(\n **get_openai_client_settings(\n api_key_env_name=api_key_env_name,\n organization_id_env_name=organization_id_env_name,\n api_version=api_version,\n endpoint_env_name=endpoint_env_name,\n deployment_id_env_name=deployment_id_env_name,\n max_retries=max_retries,\n timeout=timeout,\n )\n )\n elif api_type == \"openai\":\n return AsyncOpenAI(\n **get_openai_client_settings(\n api_key_env_name=api_key_env_name,\n organization_id_env_name=organization_id_env_name,\n max_retries=max_retries,\n timeout=timeout,\n )\n )\n else:\n raise ValueError(f\"api_type must be 'azure' or 'openai'. Got {api_type}.\")" }, { "identifier": "get_client", "path": "src/langrila/utils.py", "snippet": "def get_client(\n api_key_env_name: str,\n api_version: Optional[str] = None,\n endpoint_env_name: Optional[str] = None,\n organization_id_env_name: Optional[str] = None,\n deployment_id_env_name: Optional[str] = None,\n api_type: Optional[str] = \"openai\",\n timeout: int = 60,\n max_retries: int = 5,\n):\n if api_type == \"azure\":\n assert (\n api_version and endpoint_env_name and deployment_id_env_name\n ), \"api_version, endpoint_env_name, and deployment_id_env_name must be specified when api_type is 'azure'.\"\n return AzureOpenAI(\n **get_openai_client_settings(\n api_key_env_name=api_key_env_name,\n organization_id_env_name=organization_id_env_name,\n api_version=api_version,\n endpoint_env_name=endpoint_env_name,\n deployment_id_env_name=deployment_id_env_name,\n max_retries=max_retries,\n timeout=timeout,\n )\n )\n elif api_type == \"openai\":\n return OpenAI(\n **get_openai_client_settings(\n api_key_env_name=api_key_env_name,\n organization_id_env_name=organization_id_env_name,\n max_retries=max_retries,\n timeout=timeout,\n )\n )\n else:\n raise ValueError(f\"api_type must be 'azure' or 'openai'. Got {api_type}.\")" }, { "identifier": "get_token_limit", "path": "src/langrila/utils.py", "snippet": "def get_token_limit(model_name: str):\n if model_name in MODEL_ZOO:\n return MODEL_CONFIG[model_name][\"max_tokens\"]\n else:\n raise NotImplementedError(\n f\"get_token_limit() is not implemented for model {model_name}. Please choose from following model : {', '.join(sorted(list(MODEL_ZOO)))}.\"\n )" }, { "identifier": "make_batch", "path": "src/langrila/utils.py", "snippet": "def make_batch(iterable, batch_size=1):\n length = len(iterable)\n for ndx in range(0, length, batch_size):\n yield iterable[ndx : min(ndx + batch_size, length)]" } ]
import asyncio import json from typing import Callable, Optional from pydantic import BaseModel, field_validator from ..base import BaseConversationLengthAdjuster, BaseFilter, BaseModule from ..conversation_adjuster.truncate import OldConversationTruncationModule from ..message import Message from ..model_config import _NEWER_MODEL_CONFIG, _OLDER_MODEL_CONFIG, MODEL_CONFIG, MODEL_POINT from ..result import FunctionCallingResults, ToolOutput from ..usage import Usage from ..utils import get_async_client, get_client, get_token_limit, make_batch
5,378
max_retries: int = 2, max_tokens: int = 2048, seed: Optional[int] = None, ) -> None: assert api_type in ["openai", "azure"], "api_type must be 'openai' or 'azure'." if api_type == "azure": assert ( api_version and endpoint_env_name and deployment_id_env_name ), "api_version, endpoint_env_name, and deployment_id_env_name must be specified for Azure API." self.api_key_env_name = api_key_env_name self.organization_id_env_name = organization_id_env_name self.api_type = api_type self.api_version = api_version self.endpoint_env_name = endpoint_env_name self.deployment_id_env_name = deployment_id_env_name self.model_name = model_name self.timeout = timeout self.max_retries = max_retries self.tools = {f.__name__: f for f in tools} _tool_names_from_config = {f.name for f in tool_configs} assert ( len(_tool_names_from_config ^ set(self.tools.keys())) == 0 ), f"tool names in tool_configs must be the same as the function names in tools. tool names in tool_configs: {_tool_names_from_config}, function names in tools: {set(self.tools.keys())}" self.tool_choice = tool_choice self.max_tokens = max_tokens self.additional_inputs = {} if model_name in _NEWER_MODEL_CONFIG.keys(): self.seed = seed self.additional_inputs["seed"] = seed self.tool_configs = [f.model_dump() for f in tool_configs] self.additional_inputs["tools"] = self.tool_configs self.additional_inputs["tool_choice"] = self.tool_choice else: if seed: print( f"seed is ignored because it's not supported for {model_name} (api_type:{api_type})" ) self.tool_configs = [f.model_dump()["function"] for f in tool_configs] self.additional_inputs["functions"] = self.tool_configs self.additional_inputs["function_call"] = self.tool_choice def run(self, messages: list[dict[str, str]]) -> FunctionCallingResults: if len(messages) == 0: raise ValueError("messages must not be empty.") client = get_client( api_key_env_name=self.api_key_env_name, organization_id_env_name=self.organization_id_env_name, api_version=self.api_version, endpoint_env_name=self.endpoint_env_name, deployment_id_env_name=self.deployment_id_env_name, api_type=self.api_type, max_retries=self.max_retries, timeout=self.timeout, ) response = client.chat.completions.create( model=self.model_name, messages=messages if isinstance(messages, list) else [messages], temperature=0, max_tokens=self.max_tokens, top_p=0, frequency_penalty=0, presence_penalty=0, stop=None, **self.additional_inputs, ) usage = Usage() usage += response.usage if self.model_name in _NEWER_MODEL_CONFIG.keys(): response_message = response.choices[0].message tool_calls = response_message.tool_calls results = [] if tool_calls is not None: for tool_call in tool_calls: call_id = tool_call.id funcname = tool_call.function.name args = tool_call.function.arguments func_out = self.tools[funcname](**json.loads(args)) output = ToolOutput( call_id=call_id, funcname=funcname, args=args, output=func_out, ) results.append(output) return FunctionCallingResults(usage=usage, results=results, prompt=messages) elif self.model_name in _OLDER_MODEL_CONFIG.keys(): response_message = response.choices[0].message function_call = response_message.function_call output = [] if function_call is not None: funcname = function_call.name args = function_call.arguments func_out = self.tools[funcname](**json.loads(args)) output += [ ToolOutput( call_id=None, funcname=funcname, args=args, output=func_out, ) ] return FunctionCallingResults(usage=usage, results=output, prompt=messages) async def arun(self, messages: list[dict[str, str]]) -> FunctionCallingResults: if len(messages) == 0: raise ValueError("messages must not be empty.")
class ToolProperty(BaseModel): name: str type: str description: str def model_dump(self): return {self.name: super().model_dump(exclude=["name"])} @field_validator("type") def check_type_value(cls, v): if v not in {"string", "number", "boolean"}: raise ValueError("type must be one of string or number.") return v class ToolParameter(BaseModel): type: str = "object" properties: list[ToolProperty] required: Optional[list[str]] = None def model_dump(self): dumped = super().model_dump(exclude=["properties", "required"]) _properties = {} for p in self.properties: _properties.update(p.model_dump()) dumped["properties"] = _properties if self.required is not None: dumped["required"] = self.required return dumped @field_validator("type") def check_type_value(cls, v): if v not in {"object"}: raise ValueError("supported type is only object") return v @field_validator("required") def check_required_value(cls, required, values): properties = values.data["properties"] property_names = {p.name for p in properties} if required is not None: for r in required: if r not in property_names: raise ValueError(f"required property '{r}' is not defined in properties.") return required class ToolConfig(BaseModel): name: str type: str = "function" description: str parameters: ToolParameter def model_dump(self): dumped = super().model_dump(exclude=["parameters", "type"]) dumped["parameters"] = self.parameters.model_dump() return {"type": self.type, self.type: dumped} @field_validator("type") def check_type_value(cls, v): if v not in {"function"}: raise ValueError("supported type is only function") return v class BaseFunctionCallingModule(BaseModule): def __init__( self, api_key_env_name: str, model_name: str, tools: list[Callable], tool_configs: list[ToolConfig], tool_choice: str = "auto", api_type: str = "openai", api_version: Optional[str] = None, endpoint_env_name: Optional[str] = None, deployment_id_env_name: Optional[str] = None, organization_id_env_name: Optional[str] = None, timeout: int = 30, max_retries: int = 2, max_tokens: int = 2048, seed: Optional[int] = None, ) -> None: assert api_type in ["openai", "azure"], "api_type must be 'openai' or 'azure'." if api_type == "azure": assert ( api_version and endpoint_env_name and deployment_id_env_name ), "api_version, endpoint_env_name, and deployment_id_env_name must be specified for Azure API." self.api_key_env_name = api_key_env_name self.organization_id_env_name = organization_id_env_name self.api_type = api_type self.api_version = api_version self.endpoint_env_name = endpoint_env_name self.deployment_id_env_name = deployment_id_env_name self.model_name = model_name self.timeout = timeout self.max_retries = max_retries self.tools = {f.__name__: f for f in tools} _tool_names_from_config = {f.name for f in tool_configs} assert ( len(_tool_names_from_config ^ set(self.tools.keys())) == 0 ), f"tool names in tool_configs must be the same as the function names in tools. tool names in tool_configs: {_tool_names_from_config}, function names in tools: {set(self.tools.keys())}" self.tool_choice = tool_choice self.max_tokens = max_tokens self.additional_inputs = {} if model_name in _NEWER_MODEL_CONFIG.keys(): self.seed = seed self.additional_inputs["seed"] = seed self.tool_configs = [f.model_dump() for f in tool_configs] self.additional_inputs["tools"] = self.tool_configs self.additional_inputs["tool_choice"] = self.tool_choice else: if seed: print( f"seed is ignored because it's not supported for {model_name} (api_type:{api_type})" ) self.tool_configs = [f.model_dump()["function"] for f in tool_configs] self.additional_inputs["functions"] = self.tool_configs self.additional_inputs["function_call"] = self.tool_choice def run(self, messages: list[dict[str, str]]) -> FunctionCallingResults: if len(messages) == 0: raise ValueError("messages must not be empty.") client = get_client( api_key_env_name=self.api_key_env_name, organization_id_env_name=self.organization_id_env_name, api_version=self.api_version, endpoint_env_name=self.endpoint_env_name, deployment_id_env_name=self.deployment_id_env_name, api_type=self.api_type, max_retries=self.max_retries, timeout=self.timeout, ) response = client.chat.completions.create( model=self.model_name, messages=messages if isinstance(messages, list) else [messages], temperature=0, max_tokens=self.max_tokens, top_p=0, frequency_penalty=0, presence_penalty=0, stop=None, **self.additional_inputs, ) usage = Usage() usage += response.usage if self.model_name in _NEWER_MODEL_CONFIG.keys(): response_message = response.choices[0].message tool_calls = response_message.tool_calls results = [] if tool_calls is not None: for tool_call in tool_calls: call_id = tool_call.id funcname = tool_call.function.name args = tool_call.function.arguments func_out = self.tools[funcname](**json.loads(args)) output = ToolOutput( call_id=call_id, funcname=funcname, args=args, output=func_out, ) results.append(output) return FunctionCallingResults(usage=usage, results=results, prompt=messages) elif self.model_name in _OLDER_MODEL_CONFIG.keys(): response_message = response.choices[0].message function_call = response_message.function_call output = [] if function_call is not None: funcname = function_call.name args = function_call.arguments func_out = self.tools[funcname](**json.loads(args)) output += [ ToolOutput( call_id=None, funcname=funcname, args=args, output=func_out, ) ] return FunctionCallingResults(usage=usage, results=output, prompt=messages) async def arun(self, messages: list[dict[str, str]]) -> FunctionCallingResults: if len(messages) == 0: raise ValueError("messages must not be empty.")
client = get_async_client(
12
2023-12-10 09:42:35+00:00
8k
Open-All-Scale-Causal-Engine/OpenASCE
openasce/discovery/search_discovery/search_discovery.py
[ { "identifier": "CausalGraph", "path": "openasce/discovery/causal_graph.py", "snippet": "class CausalGraph(object):\n \"\"\"Causal Graph Class\n\n Represent the casual graph\n\n \"\"\"\n\n DEFAULT_COLUMN_NAME_PREFIX = \"x\"\n\n def __init__(self, names=[], bn=None, w: np.ndarray = None):\n \"\"\"Constructor\n\n Arguments:\n names: the node names\n bn: basic causal graph\n w: the connection matrix for causal graph\n\n \"\"\"\n self.para = None\n self.parents = {} # {c1:[p1, p2],c2:[p2,p3]....}\n self.names_to_index = {}\n self.index_to_names = {}\n self.n = 0\n self.index_exclude = []\n if bn is not None:\n self.copy(bn)\n else:\n if names:\n self.names_init(names)\n if w is not None:\n if self.names_to_index and self.index_to_names and self.parents:\n pass\n else:\n self.names_init(\n [\n self.DEFAULT_COLUMN_NAME_PREFIX + str(i)\n for i in range(w.shape[0])\n ]\n )\n nz = w.nonzero()\n for _ in map(lambda x: self.add_edge(x[0], x[1]), zip(nz[0], nz[1])):\n pass\n\n def names_init(self, names: List[str]) -> None:\n \"\"\"Initialize the graph with feature names\n\n initialize the names_to_index and index_to_names attributes\n initialize parents[i] = set() (no edges for the moment)\n\n Arguments:\n names (list of string): the names of the nodes\n\n Returns:\n None\n \"\"\"\n tmp_names = copy.deepcopy(names)\n self.names_to_index = {name: index for index, name in enumerate(names)}\n self.index_to_names = {index: name for index, name in enumerate(tmp_names)}\n self.n = len(self.names_to_index)\n for i in range(self.n):\n self.parents[i] = set()\n\n def parents_exclude(self, name_list: List[str]) -> None:\n for name in name_list:\n self.index_exclude.append(self.names_to_index[name])\n\n def random_init(self, max_parents: int = None) -> None:\n \"\"\"Add edges randomly\n\n For each node, pick a random number of the desired number of parents.\n Then, for each candidate, pick another random number. In average,\n the node will have the desired number of parents.\n\n Arguments:\n max_parents: maximal number of one node's parents\n \"\"\"\n max_parents = max_parents if max_parents else self.n - 1\n\n for i in range(self.n):\n nparents = np.random.randint(0, max_parents + 1)\n p = nparents / (self.n + 1.0)\n for j in range(self.n):\n if j != i and np.random.uniform() < p:\n self.add_edge(j, i)\n\n def merge(\n self, g1, g2, p1=1, p2=1, max_parents: int = None, mut_rate: float = 0.0\n ) -> None:\n \"\"\"Pick up edges from both g1 and g2 according to some random policy\n\n Arguments:\n g1 (CausalGraph)\n g1 (CausalGraph)\n p1 (float in [0,1]): proba of an edge in g1 being in self\n p2 (float in [0,1]): proba of an edge in g2 being in self\n p1 + p2 = 1\n max_parents (int)\n\n \"\"\"\n # merge randomly the two graphs\n self.random_merge(g1, g2, p1, p2)\n\n # introduce mutations\n self.mutate(mut_rate)\n\n # remove extra parents\n self.remove_extra_parents(max_parents)\n\n def random_merge(self, g1, g2, p1, p2) -> None:\n \"\"\"Creates graph from edges both in g1 and g2. Adds edges according to proba p1 and p2\n\n Arguments:\n g1 (CausalGraph)\n g1 (CausalGraph)\n p1 (float in [0,1]): proba of an edge in g1 being in self\n p2 (float in [0,1]): proba of an edge in g2 being in self\n \"\"\"\n for i, js in g1.parents.items():\n for j in js:\n if np.random.uniform() < p1 or p1 == 1:\n self.add_edge(j, i)\n for i, js in g2.parents.items():\n for j in js:\n if np.random.uniform() < p2 or p2 == 1:\n self.add_edge(j, i)\n\n def mutate(self, mut_rate: float = 0) -> None:\n \"\"\"Introduces new edges with a probability mut_rate\n\n Arguments:\n mut_rate (float in [0,1]): proba of mutation\n \"\"\"\n if mut_rate != 0:\n \"\"\"Do mutation like the following code snippet\n for i in range(self.n):\n for j in range(self.n):\n p = np.random.uniform()\n if p < mut_rate:\n if p < mut_rate / 2:\n self.add_edge(i, j)\n else:\n self.remove_edge(i, j)\n \"\"\"\n for _ in map(\n lambda x: self.add_edge(x[0], x[1])\n if x[2] < 0.25\n else self.remove_edge(x[0], x[1]),\n filter(\n lambda x: x[2] <= 0.5,\n map(\n lambda x: x + (np.random.uniform(),),\n itertools.product(self.n, self.n),\n ),\n ),\n ):\n pass\n\n def remove_extra_parents(self, max_parents: int = None) -> None:\n \"\"\"Removes extra edges if does not respect max parents constraint\n\n Arguments:\n max_parents: the maximal number of the node's parents\n \"\"\"\n if max_parents is not None:\n for i, js in self.parents.items():\n if len(js) > max_parents:\n indices = np.random.permutation(range(len(js)))\n for j in indices[0 : len(js) - max_parents]:\n self.remove_edge(j, i)\n\n def num_save(self, file_name: str) -> None:\n \"\"\"\n Saves the graph in number format\n\n Example\n parent1, child1\n parent2, child2\n\n Arguments:\n file_name: saved file path\n \"\"\"\n with open(file_name, \"w\") as f:\n for child_index, parents in self.parents.items():\n for parent_index in parents:\n f.write(f\"{parent_index},{child_index}\\n\")\n\n def save(self, file_path: str) -> None:\n \"\"\"Saves the graph in the desired format\n\n Example\n parent1, child1\n parent2, child2\n Arguments:\n file_path: saved file path\n \"\"\"\n with open(file_path, \"w\") as f:\n for child_index, parents in self.parents.items():\n for parent_index in parents:\n parent = self.index_to_names.get(parent_index)\n child = self.index_to_names.get(child_index)\n f.write(f\"{parent},{child}\\n\")\n\n def load(self, file_name: str) -> None:\n \"\"\"Loads structure from file. See save method\n\n Arguments:\n file_name: the path of the file to be loaded\n \"\"\"\n if not (self.names_to_index and self.index_to_names):\n name_set = set()\n # Go through the file to get all node names\n with open(file_name) as f:\n for line in f:\n line = line.strip().split(\",\")\n if len(line) == 2:\n p = line[0].replace(\"'\", \"\").replace('\"', \"\").strip()\n c = line[1].replace(\"'\", \"\").replace('\"', \"\").strip()\n if p not in name_set:\n name_set.add(p)\n if c not in name_set:\n name_set.add(c)\n self.names_to_index = {name: index for index, name in enumerate(name_set)}\n self.index_to_names = {index: name for index, name in enumerate(name_set)}\n with open(file_name) as f:\n for line in f:\n line = line.strip().split(\",\")\n if len(line) == 2:\n p = line[0].replace(\"'\", \"\").replace('\"', \"\").strip()\n c = line[1].replace(\"'\", \"\").replace('\"', \"\").strip()\n logger.info(f\"p={p}, c={c}\")\n p_index, c_index = self.names_to_index[p], self.names_to_index[c]\n self.add_edge(p_index, c_index)\n\n def is_cyclic(self) -> bool:\n \"\"\"Returns True if a cycle is found else False.\n\n Iterates over the nodes to find all the parents' parents, etc.\n A cycle is found if a node belongs to its own parent's set.\n\n \"\"\"\n all_parents = copy.deepcopy(self.parents)\n update = True\n while update:\n update = False\n for i in range(self.n):\n parents = list(all_parents[i])\n nparents = len(parents)\n for p in parents:\n all_parents[i].update(all_parents[p])\n if nparents != len(all_parents[i]):\n update = True\n if i in all_parents[i]:\n return True\n return False\n\n def copy(self, cg) -> None:\n \"\"\"Copies the structure of cg inside self and erases everything else\n\n Arguments:\n cg (CausalGraph): model\n \"\"\"\n self.index_to_names = copy.deepcopy(cg.index_to_names)\n self.names_to_index = copy.deepcopy(cg.names_to_index)\n self.n = cg.n\n self.parents = copy.deepcopy(cg.parents)\n\n def add_edge(\n self, parent: Union[int, str], child: Union[int, str], max_parents=None\n ) -> bool:\n \"\"\"Adds edge if respects max parents constraint and does not create a cycle\n\n Arguments:\n parent (int): id of parent\n child (int): id of child\n max_parents (int): None means no constraints\n\n Returns\n True if actually added the edge and False means no way to add the edge\n \"\"\"\n parent = self.names_to_index.get(parent) if isinstance(parent, str) else parent\n child = self.names_to_index.get(child) if isinstance(child, str) else child\n if (\n parent is None\n or child is None\n or parent >= self.n\n or child >= self.n\n or parent == child\n ):\n raise ValueError(f\"Error parent or child\")\n if max_parents is not None and len(self.parents[child]) >= max_parents:\n return False\n if child not in self.parents:\n self.parents[child] = set()\n self.parents[child].add(parent)\n if self.is_cyclic():\n logger.debug(\n f\"The edge from {parent} to {child} produces a cycle and be refused\"\n )\n self.remove_edge(parent, child)\n return False\n return True\n\n def remove_edge(self, parent: int, child: int, force: bool = True) -> None:\n try:\n self.parents[child].remove(parent)\n except Exception as e:\n if force:\n logger.debug(f\"Exception happens in remove edge: \\n{e}\")\n else:\n raise e\n\n def score(self, data: np.ndarray, rd: Dict[int, int] = None) -> float:\n \"\"\"Computes bayesian score of the structure given some data assuming uniform prior\n\n Example\n s = cg.score(data)\n\n Arguments:\n data: (nsamples, nfeatures)\n\n Returns\n s (float): bayesian score\n\n \"\"\"\n s = 0\n r = rd if rd else self.compute_r(data)\n for i in range(self.n):\n s += self.score_node(i, data, r)\n return s\n\n def compute_r(self, data: np.ndarray) -> dict:\n \"\"\"Compute the number of the value for each node\n\n Arguments:\n data (np array): (nsamples, nfeatures)\n Returns\n r (dict): r[i] = r_i\n \"\"\"\n r = {}\n for i in range(self.n):\n r[i] = np.unique(data[:, i]).shape[0]\n return r\n\n def score_node(self, i, data: np.ndarray, r) -> float:\n \"\"\"Compute the score of node i\n\n Arguments:\n i (int): node\n data (np array): (nsamples, nfeatures)\n r (dict of np array): r[i] = nb possible instances of i\n Returns\n s (float): contribution to log score of node i\n \"\"\"\n m, m0 = Counter(), Counter()\n columns = [i] + list(self.parents.get(i))\n extracted_data = data[:, columns]\n # counting nb of each instance of (node, parents) and (parents)\n for sample in extracted_data:\n m[tuple(sample)] += 1\n m0[tuple(sample[1:])] += 1\n # Adding contribution to the score (assuming uniform prior)\n s: float = 0.0\n \"\"\"Like following code snippet\n for c in m0.values():\n s -= gammaln(r[i] + c)\n s += gammaln(r[i])\n \"\"\"\n stat_i = r[i]\n s -= sum(gammaln(stat_i + c) for c in m0.values())\n s += gammaln(stat_i) * len(m0.values())\n \"\"\"Like following code snippet\n for c in m.values():\n s += gammaln(1 + c)\n \"\"\"\n s += sum(gammaln(1 + c) for c in m.values())\n return s\n\n def calculate_parameter(self, data: np.ndarray, rd: Dict[int, int] = None):\n \"\"\"Calculate the edge weight in the graph\n\n Arguments:\n data: samples\n rd: r[i] = r_i\n \"\"\"\n r = rd if rd else self.compute_r(data)\n node_param = {}\n aux_para_cp = {}\n for i in self.parents.keys():\n if i not in node_param:\n node_param[i] = {}\n if i not in aux_para_cp:\n aux_para_cp[i] = {}\n list_par = [i] + list(self.parents[i])\n data_par = data[:, list_par]\n all_count = 0\n column_list = [self.index_to_names[k] for k in list_par]\n for data_line in data_par:\n tup_k = tuple(data_line)\n if tup_k in aux_para_cp[i].keys():\n aux_para_cp[i][tup_k] += 1\n else:\n aux_para_cp[i][tup_k] = 1\n name = \"\"\n for k in range(len(list_par)):\n name += self.index_to_names[list_par[k]] + \" = {} \".format(\n data_line[k]\n )\n if name in node_param[i].keys():\n node_param[i][name] += 1\n else:\n node_param[i][name] = 1\n all_count += 1\n count = 1\n for k_s in r.keys():\n if k_s in list_par:\n count *= r[k_s]\n for tup_key in node_param[i].keys():\n node_param[i][tup_key] = (1 + node_param[i][tup_key]) / (\n count + all_count\n )\n df_res = []\n for tup_key in aux_para_cp[i].keys():\n aux_para_cp[i][tup_key] = (1 + aux_para_cp[i][tup_key]) / (\n count + all_count\n )\n list_tmp = list(tup_key)\n list_tmp.append(aux_para_cp[i][tup_key])\n df_res.append(list_tmp)\n column_list.append(GraphNodeForm.SCORE_COLUMN_NAME)\n p_ = GraphNodeForm(df_res, columns=column_list)\n node_param[i] = p_\n self.para = node_param\n return self.para" }, { "identifier": "Discovery", "path": "openasce/discovery/discovery.py", "snippet": "class Discovery(Runtime):\n \"\"\"Discovery Class\n\n Base class of the causal discovery\n\n Attributes:\n node_names (List[str]): the name of graph node, which should be set before fit\n\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self._node_names = []\n\n def fit(self, *, X: Union[np.ndarray, Callable], **kwargs) -> None:\n \"\"\"Feed the sample data and search the causal relation on them\n\n Arguments:\n X: Features of the samples.\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(f\"Not implement for abstract class\")\n\n def get_result(self):\n \"\"\"Output the causal graph\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(f\"Not implement for abstract class\")\n\n @property\n def node_names(self):\n return self._node_names\n\n @node_names.setter\n def node_names(self, value: List[str]):\n self._node_names = value" }, { "identifier": "Strategy", "path": "openasce/discovery/search_discovery/search_strategy.py", "snippet": "class Strategy(object):\n \"\"\"General class to implement different structure learning methods\n\n Attributes\n edge_gain (float): the minimal gain of adding edge.\n target_name (str): the name of the node that will be label.\n \"\"\"\n\n def __init__(self, node_names: List[str], **kwargs):\n \"\"\"Contructor\n\n Arguments:\n node_names: the name of nodes\n \"\"\"\n self.node_names = node_names\n self.strategy_name = \"k2\"\n self.edge_gain = kwargs.get(\"edge_gain\", 20)\n self.target_name = kwargs.get(\"target_name\", None)\n self.target_index = (\n self.node_names.index(self.target_name) if self.target_name else None\n )\n\n def run(self, data: np.ndarray, **kwargs) -> Tuple:\n \"\"\"Run the actual strategy\n\n Arguments:\n data: the features of samples\n **kwargs (dict): dictionnary with method specific args\n\n Returns:\n\n \"\"\"\n g, s = self.k2(data=data, **kwargs)\n logger.info(f\"Best score is {s}\")\n return g, s\n\n def best_parent(self, *, g, s, node_i, data, max_parents, r, s_i):\n \"\"\"Search for best parent\n\n Returns g by adding to node i the best parent that maximizes the score\n\n Arguments:\n\n Returns:\n\n \"\"\"\n found_new = False\n g_max = g\n s_max = s\n shuffle_no = np.random.permutation(range(g.n))\n if self.target_name:\n # The target node can't be any node's parent if set target, so remove it from the node candidate\n shuffle_no = np.delete(\n shuffle_no, np.where(shuffle_no == self.target_index)\n )\n shuffle_no_new = shuffle_no\n edge_gain = self.edge_gain\n for j in shuffle_no_new:\n if j != node_i and j not in g.parents[node_i]:\n g_work = CausalGraph(bn=g)\n if g_work.add_edge(j, node_i, max_parents):\n # Try to add one edge between (j, node_i)\n new_score = g_work.score_node(node_i, data, r)\n logger.debug(f\"new_score={new_score}\")\n s_new = s - s_i + new_score\n if s_new > s_max + edge_gain:\n found_new = True\n g_max = g_work\n s_max = s_new\n return g_max, s_max, found_new\n\n def k2(self, data: np.ndarray, **kwargs):\n \"\"\"Implements k2 algorithm\n\n Agrument:\n data: the features of samples\n \"\"\"\n names = self.node_names\n global_max_parents = (\n kwargs.get(\"max_parents\")\n if kwargs.get(\"max_parents\")\n else len(list(names)) / 2\n )\n max_parents = global_max_parents\n logger.info(\n f\"current max parent number: {global_max_parents}, target_name={self.target_name}\"\n )\n ordering = np.random.permutation(range(len(names)))\n if self.target_index: # set target only so put target first one\n ordering = np.delete(ordering, np.where(ordering == self.target_index))\n ordering = np.insert(ordering, 0, self.target_index)\n max_parents = min(max_parents, len(list(names)) / 2)\n g = CausalGraph(names)\n global_s = g.score(data)\n logger.info(f\"initial graph score:{global_s}\")\n curr_data_r = g.compute_r(data)\n logger.info(f\"graph curr_data_r={curr_data_r}\")\n\n curr_pos = 0\n ordering_size = len(ordering)\n while curr_pos < ordering_size:\n node_i = ordering[curr_pos]\n s_i = g.score_node(node_i, data, curr_data_r)\n logger.info(f\"Begin to explore node {node_i}, s_i={s_i}\")\n curr_parent_count, found_new = 0, True\n while found_new and curr_parent_count < max_parents:\n g, global_s, found_new = self.best_parent(\n g=g,\n s=global_s,\n node_i=node_i,\n data=data,\n max_parents=global_max_parents,\n r=curr_data_r,\n s_i=s_i,\n )\n curr_parent_count += 1\n max_parents = global_max_parents\n curr_pos += 1\n return g, global_s" }, { "identifier": "logger", "path": "openasce/utils/logger.py", "snippet": "GLOBAL_LOGGER_NAME = \"openasce-log\"\nDEFAULT_FORMAT = (\n \"[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d:%(funcName)s] %(message)s\"\n)\nDEFAULT_FORMATTER = logging.Formatter(DEFAULT_FORMAT)\ndef init_custom_logger(name):\nclass openasceLogger(object):" } ]
from typing import Callable, Tuple, Union from openasce.discovery.causal_graph import CausalGraph from openasce.discovery.discovery import Discovery from openasce.discovery.search_discovery.search_strategy import Strategy from openasce.utils.logger import logger import numpy as np
5,918
# Copyright 2023 AntGroup CO., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. class CausalSearchDiscovery(Discovery): """Execute the causal inference by search method Attributes: """ def __init__(self) -> None: """Constructor Arguments: Returns: """ super().__init__() def fit(self, *, X: Union[np.ndarray, Callable], **kwargs) -> None: """Feed the sample data Arguments: X (num of samples, features or callable returning np.ndarray): samples Returns: """ self._data = X() if callable(X) else X if isinstance(self._data, np.ndarray): if self.node_names and len(self.node_names) == self._data.shape[1]: pass elif self.node_names: raise ValueError( f"The number of node does NOT match the column num of samples." ) else: logger.info( f"No node name specified. Use arbitrary names like x0, x1..." ) self.node_names = [f"x{i}" for i in range(self._data.shape[1])] elif isinstance(self._data, dict): self.node_names = self._data.get("node_names") self._data = self._data.get("data") elif isinstance(self._data, tuple): self.node_names = [self._data[0]] self._data = self._data[1] else: raise ValueError(f"No reasonal input data. {type(self._data)}")
# Copyright 2023 AntGroup CO., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. class CausalSearchDiscovery(Discovery): """Execute the causal inference by search method Attributes: """ def __init__(self) -> None: """Constructor Arguments: Returns: """ super().__init__() def fit(self, *, X: Union[np.ndarray, Callable], **kwargs) -> None: """Feed the sample data Arguments: X (num of samples, features or callable returning np.ndarray): samples Returns: """ self._data = X() if callable(X) else X if isinstance(self._data, np.ndarray): if self.node_names and len(self.node_names) == self._data.shape[1]: pass elif self.node_names: raise ValueError( f"The number of node does NOT match the column num of samples." ) else: logger.info( f"No node name specified. Use arbitrary names like x0, x1..." ) self.node_names = [f"x{i}" for i in range(self._data.shape[1])] elif isinstance(self._data, dict): self.node_names = self._data.get("node_names") self._data = self._data.get("data") elif isinstance(self._data, tuple): self.node_names = [self._data[0]] self._data = self._data[1] else: raise ValueError(f"No reasonal input data. {type(self._data)}")
strategy = Strategy(node_names=self.node_names, **kwargs)
2
2023-12-06 05:54:36+00:00
8k
8none1/idealLED
custom_components/ideal_led/config_flow.py
[ { "identifier": "IDEALLEDInstance", "path": "custom_components/ideal_led/idealled.py", "snippet": "class IDEALLEDInstance:\n def __init__(self, address, reset: bool, delay: int, hass) -> None:\n self.loop = asyncio.get_running_loop()\n self._mac = address\n self._reset = reset\n self._delay = delay\n self._hass = hass\n self._device: BLEDevice | None = None\n self._device = bluetooth.async_ble_device_from_address(self._hass, address)\n if not self._device:\n raise ConfigEntryNotReady(\n f\"You need to add bluetooth integration (https://www.home-assistant.io/integrations/bluetooth) or couldn't find a nearby device with address: {address}\"\n )\n self._connect_lock: asyncio.Lock = asyncio.Lock()\n self._client: BleakClientWithServiceCache | None = None\n self._disconnect_timer: asyncio.TimerHandle | None = None\n self._cached_services: BleakGATTServiceCollection | None = None\n self._expected_disconnect = False\n self._is_on = None\n self._rgb_color = None\n self._brightness = 255\n self._effect = None\n self._effect_speed = 0x64\n self._color_mode = ColorMode.RGB\n self._write_uuid = None\n self._write_colour_uuid = None\n self._read_uuid = None\n self._turn_on_cmd = None\n self._turn_off_cmd = None\n self._model = self._detect_model()\n self._on_update_callbacks = []\n \n LOGGER.debug(\n \"Model information for device %s : ModelNo %s. MAC: %s\",\n self._device.name,\n self._model,\n self._mac,\n )\n\n def _detect_model(self):\n x = 0\n for name in NAME_ARRAY:\n if self._device.name.lower().startswith(name.lower()): # TODO: match on BLE provided model instead of name\n return x\n x = x + 1\n\n async def _write(self, data: bytearray):\n \"\"\"Send command to device and read response.\"\"\"\n await self._ensure_connected()\n cipher = AES.new(SECRET_ENCRYPTION_KEY, AES.MODE_ECB)\n ciphered_data = cipher.encrypt(data)\n await self._write_while_connected(ciphered_data)\n\n async def _write_colour_data(self, data: bytearray):\n \"\"\"Send command to device and read response.\"\"\"\n await self._ensure_connected()\n await self._write_colour_while_connected(data)\n\n async def _write_while_connected(self, data: bytearray):\n LOGGER.debug(f\"Writing data to {self.name}: {data}\")\n await self._client.write_gatt_char(self._write_uuid, data, False)\n \n async def _write_colour_while_connected(self, data: bytearray):\n LOGGER.debug(f\"Writing colour data to {self.name}: {data}\")\n await self._client.write_gatt_char(self._write_colour_uuid, data, False)\n \n def _notification_handler(self, _sender: BleakGATTCharacteristic, data: bytearray) -> None:\n # This doesn't work. I can't get the controller to send notifications.\n \"\"\"Handle BLE notifications from the device. Update internal state to reflect the device state.\"\"\"\n LOGGER.debug(\"N: %s: Notification received\", self.name)\n #self.local_callback()\n\n\n @property\n def mac(self):\n return self._device.address\n\n @property\n def reset(self):\n return self._reset\n\n @property\n def name(self):\n return self._device.name\n\n @property\n def rssi(self):\n return self._device.rssi\n\n @property\n def is_on(self):\n return self._is_on\n\n @property\n def brightness(self):\n return self._brightness \n\n @property\n def rgb_color(self):\n return self._rgb_color\n\n @property\n def effect_list(self) -> list[str]:\n return EFFECT_LIST\n\n @property\n def effect(self):\n return self._effect\n \n @property\n def color_mode(self):\n return self._color_mode\n\n @retry_bluetooth_connection_error\n async def set_rgb_color(self, rgb: Tuple[int, int, int], brightness: int | None = None):\n # TODO: Add support for brightness\n self._rgb_color = rgb\n if brightness is None:\n if self._brightness is None:\n self._brightness = 255\n else:\n brightness = self._brightness\n brightness_percent = int(brightness * 100 / 255)\n # Now adjust the RBG values to match the brightness\n red = int(rgb[0] * brightness_percent / 100)\n green = int(rgb[1] * brightness_percent / 100)\n blue = int(rgb[2] * brightness_percent / 100)\n # RGB packet\n rgb_packet = bytearray.fromhex(\"0F 53 47 4C 53 00 00 64 50 1F 00 00 1F 00 00 32\")\n red = int(red >> 3) # You CAN send 8 bit colours to this thing, but you probably shouldn't for power reasons. Thanks to the good folks at Hacker News for that insight.\n green = int(green >> 3)\n blue = int(blue >> 3)\n rgb_packet[9] = red\n rgb_packet[12] = red\n rgb_packet[10] = green\n rgb_packet[13] = green\n rgb_packet[11] = blue\n rgb_packet[14] = blue\n await self._write(rgb_packet) \n\n\n @retry_bluetooth_connection_error\n # effect, reverse=0, speed=50, saturation=50, colour_data=COLOUR_DATA\n async def set_effect(self, effect: str, brightness: int | None = NotImplemented):\n if effect not in EFFECT_LIST:\n LOGGER.error(\"Effect %s not supported\", effect)\n return\n self._effect = effect\n effect_id = EFFECT_MAP.get(effect)\n if effect_id > 11: effect = 11\n packet = bytearray.fromhex(\"0A 4D 55 4C 54 08 00 64 50 07 32 00 00 00 00 00\")\n packet[5] = effect_id\n packet[6] = 0 # reverse\n packet[8] = 50 # speed\n packet[10] = 50 # saturation (brightness?)\n await self._write(packet)\n # Now we send the colour data\n await self.write_colour_data()\n \n @retry_bluetooth_connection_error\n async def write_colour_data(self):\n # This is sent after switching to an effect to tell the device what sort of pattern to show.\n # In the app you can edit this yourself, but HA doesn't have the UI for such a thing\n # so for now I'm just going to hardcode it to a rainbow pattern. You could change this to\n # whatever you want, but for an effect the maximum length is 7 colours.\n colour_list = []\n colour_divisions = int(360 / 7)\n for i in range(7):\n h = i * colour_divisions\n r, g, b = colorsys.hsv_to_rgb(h / 360, 1, 1)\n r = int(r * 255)\n g = int(g * 255)\n b = int(b * 255)\n colour_list.append((r, g, b))\n #print(f\"Colour list: {colour_list}\")\n length = len(colour_list)\n colour_data = []\n colour_data.append(length*3) # 3 bytes per colour\n colour_data.append(0) # Don't know what this is, perhaps just a separator\n for colour in colour_list:\n colour_data.append(colour[0])\n colour_data.append(colour[1])\n colour_data.append(colour[2])\n await self._write_colour_data(colour_data)\n\n\n @retry_bluetooth_connection_error\n async def turn_on(self):\n packet = bytearray.fromhex(\"05 54 55 52 4E 01 00 00 00 00 00 00 00 00 00 00\")\n packet[5] = 1\n await self._write(packet)\n self._is_on = True\n\n @retry_bluetooth_connection_error\n async def turn_off(self):\n packet = bytearray.fromhex(\"05 54 55 52 4E 01 00 00 00 00 00 00 00 00 00 00\")\n packet[5] = 0\n await self._write(packet)\n self._is_on = False\n\n @retry_bluetooth_connection_error\n async def update(self):\n LOGGER.debug(\"%s: Update in lwdnetwf called\", self.name)\n try:\n await self._ensure_connected()\n self._is_on = False\n except Exception as error:\n self._is_on = None # failed to connect, this should mark it as unavailable\n LOGGER.error(\"Error getting status: %s\", error)\n track = traceback.format_exc()\n LOGGER.debug(track)\n\n async def _ensure_connected(self) -> None:\n \"\"\"Ensure connection to device is established.\"\"\"\n if self._connect_lock.locked():\n LOGGER.debug(\n \"%s: Connection already in progress, waiting for it to complete\",\n self.name,\n )\n if self._client and self._client.is_connected:\n self._reset_disconnect_timer()\n return\n async with self._connect_lock:\n # Check again while holding the lock\n if self._client and self._client.is_connected:\n self._reset_disconnect_timer()\n return\n LOGGER.debug(\"%s: Connecting\", self.name)\n client = await establish_connection(\n BleakClientWithServiceCache,\n self._device,\n self.name,\n self._disconnected,\n cached_services=self._cached_services,\n ble_device_callback=lambda: self._device,\n )\n LOGGER.debug(\"%s: Connected\", self.name)\n resolved = self._resolve_characteristics(client.services)\n if not resolved:\n # Try to handle services failing to load\n resolved = self._resolve_characteristics(await client.get_services())\n self._cached_services = client.services if resolved else None\n\n self._client = client\n self._reset_disconnect_timer()\n\n # Subscribe to notification is needed for LEDnetWF devices to accept commands\n self._notification_callback = self._notification_handler\n await client.start_notify(self._read_uuid, self._notification_callback)\n LOGGER.debug(\"%s: Subscribed to notifications\", self.name)\n\n\n def _resolve_characteristics(self, services: BleakGATTServiceCollection) -> bool:\n \"\"\"Resolve characteristics.\"\"\"\n for characteristic in NOTIFY_CHARACTERISTIC_UUIDS:\n if char := services.get_characteristic(characteristic):\n self._read_uuid = char\n LOGGER.debug(\"%s: Read UUID: %s\", self.name, self._read_uuid)\n break\n for characteristic in WRITE_CMD_CHARACTERISTIC_UUIDS:\n if char := services.get_characteristic(characteristic):\n self._write_uuid = char\n LOGGER.debug(\"%s: Write UUID: %s\", self.name, self._write_uuid)\n break\n for characteristic in WRITE_COL_CHARACTERISTIC_UUIDS:\n if char := services.get_characteristic(characteristic):\n self._write_colour_uuid = char\n LOGGER.debug(\"%s: Write colour UUID: %s\", self.name, self._write_colour_uuid)\n break\n return bool(self._read_uuid and self._write_uuid and self._write_colour_uuid)\n\n def _reset_disconnect_timer(self) -> None:\n \"\"\"Reset disconnect timer.\"\"\"\n if self._disconnect_timer:\n self._disconnect_timer.cancel()\n self._expected_disconnect = False\n if self._delay is not None and self._delay != 0:\n LOGGER.debug(\n \"%s: Configured disconnect from device in %s seconds\",\n self.name,\n self._delay\n )\n self._disconnect_timer = self.loop.call_later(self._delay, self._disconnect)\n\n def _disconnected(self, client: BleakClientWithServiceCache) -> None:\n \"\"\"Disconnected callback.\"\"\"\n if self._expected_disconnect:\n LOGGER.debug(\"%s: Disconnected from device\", self.name)\n return\n LOGGER.warning(\"%s: Device unexpectedly disconnected\", self.name)\n\n def _disconnect(self) -> None:\n \"\"\"Disconnect from device.\"\"\"\n self._disconnect_timer = None\n asyncio.create_task(self._execute_timed_disconnect())\n\n async def stop(self) -> None:\n \"\"\"Stop the LEDBLE.\"\"\"\n LOGGER.debug(\"%s: Stop\", self.name)\n await self._execute_disconnect()\n\n async def _execute_timed_disconnect(self) -> None:\n \"\"\"Execute timed disconnection.\"\"\"\n LOGGER.debug(\n \"%s: Disconnecting after timeout of %s\",\n self.name,\n self._delay\n )\n await self._execute_disconnect()\n\n async def _execute_disconnect(self) -> None:\n \"\"\"Execute disconnection.\"\"\"\n async with self._connect_lock:\n read_char = self._read_uuid\n client = self._client\n self._expected_disconnect = True\n self._client = None\n self._write_uuid = None\n self._read_uuid = None\n if client and client.is_connected:\n await client.stop_notify(read_char) # TODO: I don't think this is needed. Bleak docs say it isnt.\n await client.disconnect()\n LOGGER.debug(\"%s: Disconnected\", self.name)\n \n def local_callback(self):\n # Placeholder to be replaced by a call from light.py\n # I can't work out how to plumb a callback from here to light.py\n return" }, { "identifier": "DOMAIN", "path": "custom_components/ideal_led/const.py", "snippet": "DOMAIN = \"ideal_led\"" }, { "identifier": "CONF_RESET", "path": "custom_components/ideal_led/const.py", "snippet": "CONF_RESET = \"reset\"" }, { "identifier": "CONF_DELAY", "path": "custom_components/ideal_led/const.py", "snippet": "CONF_DELAY = \"delay\"" } ]
import asyncio import voluptuous as vol import logging from .idealled import IDEALLEDInstance from typing import Any from bluetooth_data_tools import human_readable_name from homeassistant import config_entries from homeassistant.const import CONF_MAC from homeassistant.helpers.device_registry import format_mac from homeassistant.data_entry_flow import FlowResult from homeassistant.core import callback from homeassistant.components.bluetooth import ( BluetoothServiceInfoBleak, async_discovered_service_info, ) from bluetooth_sensor_state_data import BluetoothData from home_assistant_bluetooth import BluetoothServiceInfo from .const import DOMAIN, CONF_RESET, CONF_DELAY
4,956
"""Confirm discovery.""" LOGGER.debug("Discovered bluetooth devices, step bluetooth confirm, : %s", user_input) self._set_confirm_only() return await self.async_step_user() async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle the user step to pick discovered device.""" if user_input is not None: self.mac = user_input[CONF_MAC] self.name = self.context["title_placeholders"]["name"] await self.async_set_unique_id(self.mac, raise_on_progress=False) self._abort_if_unique_id_configured() return await self.async_step_validate() current_addresses = self._async_current_ids() for discovery_info in async_discovered_service_info(self.hass): self.mac = discovery_info.address if self.mac in current_addresses: LOGGER.debug("Device %s in current_addresses", (self.mac)) continue if (device for device in self._discovered_devices if device.address == self.mac) == ([]): LOGGER.debug("Device %s in discovered_devices", (device)) continue device = DeviceData(discovery_info) if device.supported(): self._discovered_devices.append(device) if not self._discovered_devices: return await self.async_step_manual() LOGGER.debug("Discovered supported devices: %s - %s", self._discovered_devices[0].name(), self._discovered_devices[0].address()) mac_dict = { dev.address(): dev.name() for dev in self._discovered_devices } return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required(CONF_MAC): vol.In(mac_dict), } ), errors={}) async def async_step_validate(self, user_input: "dict[str, Any] | None" = None): if user_input is not None: if "flicker" in user_input: if user_input["flicker"]: return self.async_create_entry(title=self.name, data={CONF_MAC: self.mac, "name": self.name}) return self.async_abort(reason="cannot_validate") if "retry" in user_input and not user_input["retry"]: return self.async_abort(reason="cannot_connect") error = await self.toggle_light() if error: return self.async_show_form( step_id="validate", data_schema=vol.Schema( { vol.Required("retry"): bool } ), errors={"base": "connect"}) return self.async_show_form( step_id="validate", data_schema=vol.Schema( { vol.Required("flicker"): bool } ), errors={}) async def async_step_manual(self, user_input: "dict[str, Any] | None" = None): if user_input is not None: self.mac = user_input[CONF_MAC] self.name = user_input["name"] await self.async_set_unique_id(format_mac(self.mac)) return await self.async_step_validate() return self.async_show_form( step_id="manual", data_schema=vol.Schema( { vol.Required(CONF_MAC): str, vol.Required("name"): str } ), errors={}) async def toggle_light(self): if not self._instance: self._instance = IDEALLEDInstance(self.mac, False, 120, self.hass) try: await self._instance.update() await self._instance.turn_on() await asyncio.sleep(1) await self._instance.turn_off() await asyncio.sleep(1) await self._instance.turn_on() await asyncio.sleep(1) await self._instance.turn_off() except (Exception) as error: return error finally: await self._instance.stop() @staticmethod @callback def async_get_options_flow(entry: config_entries.ConfigEntry): return OptionsFlowHandler(entry) class OptionsFlowHandler(config_entries.OptionsFlow): def __init__(self, config_entry): """Initialize options flow.""" self.config_entry = config_entry async def async_step_init(self, _user_input=None): """Manage the options.""" return await self.async_step_user() async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" errors = {}
LOGGER = logging.getLogger(__name__) DATA_SCHEMA = vol.Schema({("host"): str}) class DeviceData(BluetoothData): def __init__(self, discovery_info) -> None: self._discovery = discovery_info LOGGER.debug("Discovered bluetooth devices, DeviceData, : %s , %s", self._discovery.address, self._discovery.name) def supported(self): return self._discovery.name.lower().startswith("isp-") def address(self): return self._discovery.address def get_device_name(self): return human_readable_name(None, self._discovery.name, self._discovery.address) def name(self): return human_readable_name(None, self._discovery.name, self._discovery.address) def rssi(self): return self._discovery.rssi def _start_update(self, service_info: BluetoothServiceInfo) -> None: """Update from BLE advertisement data.""" LOGGER.debug("Parsing BLE advertisement data: %s", service_info) class BJLEDFlowHandler(config_entries.ConfigFlow, domain=DOMAIN): VERSION = 1 CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL def __init__(self) -> None: self.mac = None self._device = None self._instance = None self.name = None self._discovery_info: BluetoothServiceInfoBleak | None = None self._discovered_device: DeviceData | None = None self._discovered_devices = [] async def async_step_bluetooth( self, discovery_info: BluetoothServiceInfoBleak ) -> FlowResult: """Handle the bluetooth discovery step.""" LOGGER.debug("Discovered bluetooth devices, step bluetooth, : %s , %s", discovery_info.address, discovery_info.name) await self.async_set_unique_id(discovery_info.address) self._abort_if_unique_id_configured() device = DeviceData(discovery_info) self.context["title_placeholders"] = {"name": device.name()} if device.supported(): self._discovered_devices.append(device) return await self.async_step_bluetooth_confirm() else: return self.async_abort(reason="not_supported") async def async_step_bluetooth_confirm( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Confirm discovery.""" LOGGER.debug("Discovered bluetooth devices, step bluetooth confirm, : %s", user_input) self._set_confirm_only() return await self.async_step_user() async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle the user step to pick discovered device.""" if user_input is not None: self.mac = user_input[CONF_MAC] self.name = self.context["title_placeholders"]["name"] await self.async_set_unique_id(self.mac, raise_on_progress=False) self._abort_if_unique_id_configured() return await self.async_step_validate() current_addresses = self._async_current_ids() for discovery_info in async_discovered_service_info(self.hass): self.mac = discovery_info.address if self.mac in current_addresses: LOGGER.debug("Device %s in current_addresses", (self.mac)) continue if (device for device in self._discovered_devices if device.address == self.mac) == ([]): LOGGER.debug("Device %s in discovered_devices", (device)) continue device = DeviceData(discovery_info) if device.supported(): self._discovered_devices.append(device) if not self._discovered_devices: return await self.async_step_manual() LOGGER.debug("Discovered supported devices: %s - %s", self._discovered_devices[0].name(), self._discovered_devices[0].address()) mac_dict = { dev.address(): dev.name() for dev in self._discovered_devices } return self.async_show_form( step_id="user", data_schema=vol.Schema( { vol.Required(CONF_MAC): vol.In(mac_dict), } ), errors={}) async def async_step_validate(self, user_input: "dict[str, Any] | None" = None): if user_input is not None: if "flicker" in user_input: if user_input["flicker"]: return self.async_create_entry(title=self.name, data={CONF_MAC: self.mac, "name": self.name}) return self.async_abort(reason="cannot_validate") if "retry" in user_input and not user_input["retry"]: return self.async_abort(reason="cannot_connect") error = await self.toggle_light() if error: return self.async_show_form( step_id="validate", data_schema=vol.Schema( { vol.Required("retry"): bool } ), errors={"base": "connect"}) return self.async_show_form( step_id="validate", data_schema=vol.Schema( { vol.Required("flicker"): bool } ), errors={}) async def async_step_manual(self, user_input: "dict[str, Any] | None" = None): if user_input is not None: self.mac = user_input[CONF_MAC] self.name = user_input["name"] await self.async_set_unique_id(format_mac(self.mac)) return await self.async_step_validate() return self.async_show_form( step_id="manual", data_schema=vol.Schema( { vol.Required(CONF_MAC): str, vol.Required("name"): str } ), errors={}) async def toggle_light(self): if not self._instance: self._instance = IDEALLEDInstance(self.mac, False, 120, self.hass) try: await self._instance.update() await self._instance.turn_on() await asyncio.sleep(1) await self._instance.turn_off() await asyncio.sleep(1) await self._instance.turn_on() await asyncio.sleep(1) await self._instance.turn_off() except (Exception) as error: return error finally: await self._instance.stop() @staticmethod @callback def async_get_options_flow(entry: config_entries.ConfigEntry): return OptionsFlowHandler(entry) class OptionsFlowHandler(config_entries.OptionsFlow): def __init__(self, config_entry): """Initialize options flow.""" self.config_entry = config_entry async def async_step_init(self, _user_input=None): """Manage the options.""" return await self.async_step_user() async def async_step_user(self, user_input=None): """Handle a flow initialized by the user.""" errors = {}
options = self.config_entry.options or {CONF_RESET: False,CONF_DELAY: 120}
2
2023-12-14 08:01:32+00:00
8k
amirzandieh/HyperAttention
hyper_attention.py
[ { "identifier": "add_self_attentions", "path": "src/attn_utils.py", "snippet": "def add_self_attentions(attn1, lse1, attn2, lse2):\n \"\"\"\n inputs:\n - attn1, attn2: 4d-tensors with shape [b, h, n, d]\n - lse1, lse2: 4d-tensors of log-sum-exp with shape [b, h, n, 1]\n output:\n - attn\n = (attn1 * exp(lse1) + attn2 * exp(lse2)) / (exp(lse1) + exp(lse2))\n = (attn1 + attn2 * exp(lse2 - lse1)) / (1 + exp(lse2-lse1))\n = attn1 * c + attn2 * (1-c), where c=1/(1 + exp(lse2-lse1)),\n - lse\n = log(exp(lse1) + exp(lse2))\n = log(exp(lse1) * (1 + exp(lse2 - lse1)))\n = lse1 + log(1 + exp(lse2 - lse1)) = lse1 - log(c)\n \"\"\"\n c = (1 / (1 + (lse2 - lse1).exp())).to(dtype=attn1.dtype)\n attn = c * attn1 + (1-c) * attn2\n lse = lse1 - (c + torch.finfo(lse1.dtype).eps).log()\n return attn, lse" }, { "identifier": "flash_attn_func", "path": "src/flash_attn_triton.py", "snippet": "def _fwd_kernel(\n Q,\n K,\n V,\n Bias,\n Out,\n Lse,\n softmax_scale,\n stride_qb,\n stride_qh,\n stride_qm,\n stride_kb,\n stride_kh,\n stride_kn,\n stride_vb,\n stride_vh,\n stride_vn,\n stride_bb,\n stride_bh,\n stride_bm,\n stride_ob,\n stride_oh,\n stride_om,\n nheads,\n seqlen_q,\n seqlen_k,\n seqlen_q_rounded,\n headdim,\n CACHE_KEY_SEQLEN_Q,\n CACHE_KEY_SEQLEN_K,\n BIAS_TYPE: tl.constexpr,\n IS_CAUSAL: tl.constexpr,\n BLOCK_HEADDIM: tl.constexpr,\n EVEN_M: tl.constexpr,\n EVEN_N: tl.constexpr,\n EVEN_HEADDIM: tl.constexpr,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n):\ndef _bwd_preprocess_do_o_dot(\n Out,\n DO,\n Delta,\n stride_ob,\n stride_oh,\n stride_om,\n stride_dob,\n stride_doh,\n stride_dom,\n nheads,\n seqlen_q,\n seqlen_q_rounded,\n headdim,\n BLOCK_M: tl.constexpr,\n BLOCK_HEADDIM: tl.constexpr,\n):\ndef _bwd_store_dx(\n dx_ptrs,\n dx,\n offs_n,\n offs_d,\n seqlen,\n headdim,\n EVEN_M: tl.constexpr,\n EVEN_N: tl.constexpr,\n even_headdim,\n):\ndef _bwd_kernel_one_col_block(\n start_n,\n Q,\n K,\n V,\n Bias,\n DO,\n DQ,\n DK,\n DV,\n LSE,\n D,\n softmax_scale,\n stride_qm,\n stride_kn,\n stride_vn,\n stride_bm,\n stride_dom,\n stride_dqm,\n stride_dkn,\n stride_dvn,\n seqlen_q,\n seqlen_k,\n headdim,\n ATOMIC_ADD: tl.constexpr,\n BIAS_TYPE: tl.constexpr,\n IS_CAUSAL: tl.constexpr,\n BLOCK_HEADDIM: tl.constexpr,\n EVEN_M: tl.constexpr,\n EVEN_N: tl.constexpr,\n EVEN_HEADDIM: tl.constexpr,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n):\ndef init_to_zero(name):\ndef _bwd_kernel(\n Q,\n K,\n V,\n Bias,\n DO,\n DQ,\n DK,\n DV,\n LSE,\n D,\n softmax_scale,\n stride_qb,\n stride_qh,\n stride_qm,\n stride_kb,\n stride_kh,\n stride_kn,\n stride_vb,\n stride_vh,\n stride_vn,\n stride_bb,\n stride_bh,\n stride_bm,\n stride_dob,\n stride_doh,\n stride_dom,\n stride_dqb,\n stride_dqh,\n stride_dqm,\n stride_dkb,\n stride_dkh,\n stride_dkn,\n stride_dvb,\n stride_dvh,\n stride_dvn,\n nheads,\n seqlen_q,\n seqlen_k,\n seqlen_q_rounded,\n headdim,\n CACHE_KEY_SEQLEN_Q,\n CACHE_KEY_SEQLEN_K,\n BIAS_TYPE: tl.constexpr,\n IS_CAUSAL: tl.constexpr,\n BLOCK_HEADDIM: tl.constexpr,\n SEQUENCE_PARALLEL: tl.constexpr,\n EVEN_M: tl.constexpr,\n EVEN_N: tl.constexpr,\n EVEN_HEADDIM: tl.constexpr,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n):\ndef _flash_attn_forward(q, k, v, bias=None, causal=False, softmax_scale=None):\ndef _flash_attn_backward(\n do, q, k, v, o, lse, dq, dk, dv, bias=None, causal=False, softmax_scale=None\n):\n def forward(ctx, q, k, v, bias=None, causal=False, softmax_scale=None):\n def backward(ctx, do, dlse_use_needed=None):\n BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)\n BLOCK = 128\n BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)\nclass FlashAttnFunc(torch.autograd.Function):" }, { "identifier": "hyper_attn_func", "path": "src/hyper_attn_triton.py", "snippet": "def _fwd_hyper_kernel(\n Q,\n K,\n V,\n q_sort_idx,\n k_sort_idx,\n Out,\n Lse,\n softmax_scale,\n stride_qb,\n stride_qh,\n stride_qm,\n stride_kb,\n stride_kh,\n stride_kn,\n stride_vb,\n stride_vh,\n stride_vn,\n stride_q_sort_idxb,\n stride_q_sort_idxh,\n stride_q_sort_idxm,\n stride_k_sort_idxb,\n stride_k_sort_idxh,\n stride_k_sort_idxn,\n stride_ob,\n stride_oh,\n stride_om,\n nheads,\n block_size,\n sample_size,\n seqlen_k,\n seqlen_q,\n headdim,\n v_headdim,\n smooth_block,\n CACHE_KEY_SEQLEN_Q,\n CACHE_KEY_SEQLEN_K,\n BLOCK_HEADDIM: tl.constexpr,\n V_BLOCK_HEADDIM: tl.constexpr,\n EVEN_HEADDIM: tl.constexpr,\n EVEN_V_HEADDIM: tl.constexpr,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n):\ndef _bwd_preprocess_do_o_dot(\n Out,\n DO,\n Delta,\n stride_ob,\n stride_oh,\n stride_om,\n stride_dob,\n stride_doh,\n stride_dom,\n nheads,\n seqlen_q,\n v_headdim,\n BLOCK_M: tl.constexpr,\n V_BLOCK_HEADDIM: tl.constexpr,\n):\ndef _bwd_store_dx(\n dx_ptrs,\n dx,\n offs_d,\n headdim,\n even_headdim,\n):\ndef _bwd_blocked_kernel_one_col(\n start_n,\n Q,\n K,\n V,\n Q_idx,\n K_idx,\n DO,\n DQ,\n DK,\n DV,\n LSE,\n D,\n softmax_scale,\n stride_qm,\n stride_kn,\n stride_vn,\n stride_dom,\n stride_dqm,\n stride_dkn,\n stride_dvn,\n stride_q_idxm,\n stride_k_idxn,\n seqlen_q,\n block_size,\n headdim,\n v_headdim,\n smooth_block,\n BLOCK_HEADDIM: tl.constexpr,\n V_BLOCK_HEADDIM: tl.constexpr,\n EVEN_HEADDIM: tl.constexpr,\n EVEN_V_HEADDIM: tl.constexpr,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n):\ndef _bwd_permuted_block_diagonal_kernel(\n Q,\n K,\n V,\n q_sort_idx,\n k_sort_idx,\n DO,\n DQ,\n DK,\n DV,\n LSE,\n D,\n softmax_scale,\n stride_qb,\n stride_qh,\n stride_qm,\n stride_kb,\n stride_kh,\n stride_kn,\n stride_vb,\n stride_vh,\n stride_vn,\n stride_q_sort_idxb,\n stride_q_sort_idxh,\n stride_q_sort_idxm,\n stride_k_sort_idxb,\n stride_k_sort_idxh,\n stride_k_sort_idxn,\n stride_dob,\n stride_doh,\n stride_dom,\n stride_dqb,\n stride_dqh,\n stride_dqm,\n stride_dkb,\n stride_dkh,\n stride_dkn,\n stride_dvb,\n stride_dvh,\n stride_dvn,\n nheads,\n seqlen_q,\n block_size,\n headdim,\n v_headdim,\n smooth_block,\n CACHE_KEY_SEQLEN_Q,\n CACHE_KEY_SEQLEN_K,\n BLOCK_HEADDIM: tl.constexpr,\n V_BLOCK_HEADDIM: tl.constexpr,\n EVEN_HEADDIM: tl.constexpr,\n EVEN_V_HEADDIM: tl.constexpr,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n):\ndef _bwd_sampled_col_kernel(\n Q,\n K,\n V,\n DO,\n DQ,\n DK,\n DV,\n LSE,\n D,\n softmax_scale,\n stride_qb,\n stride_qh,\n stride_qm,\n stride_kb,\n stride_kh,\n stride_kn,\n stride_vb,\n stride_vh,\n stride_vn,\n stride_dob,\n stride_doh,\n stride_dom,\n stride_dqb,\n stride_dqh,\n stride_dqm,\n stride_dkb,\n stride_dkh,\n stride_dkn,\n stride_dvb,\n stride_dvh,\n stride_dvn,\n nheads,\n seqlen_q,\n headdim,\n v_headdim,\n CACHE_KEY_SEQLEN_Q,\n CACHE_KEY_SEQLEN_K,\n BLOCK_HEADDIM: tl.constexpr,\n V_BLOCK_HEADDIM: tl.constexpr,\n EVEN_HEADDIM: tl.constexpr,\n EVEN_V_HEADDIM: tl.constexpr,\n BLOCK_M: tl.constexpr,\n BLOCK_N: tl.constexpr,\n):\ndef _hyper_attn_forward(q, k, v, q_sort_idx, k_sort_idx, block_size, sample_size, softmax_scale=None,\n smooth_block=False):\ndef _hyper_attn_backward(\n do, q, k, v, q_sort_idx, k_sort_idx, o, lse, dq, dk, dv, block_size, sample_size, softmax_scale=None,\n smooth_block=False):\n def forward(ctx, q, k, v, q_sort_idx, k_sort_idx, block_size, sample_size=0, softmax_scale=None,\n smooth_block=False):\n def backward(ctx, do, dlse_use_needed=None):\n BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)\n V_BLOCK_HEADDIM = max(triton.next_power_of_2(v_headdim), 16)\n BLOCK = 128\n V_BLOCK_HEADDIM = max(triton.next_power_of_2(v_headdim), 16)\n BLOCK_HEADDIM = max(triton.next_power_of_2(d), 16)\n BLOCK = 128\nclass HyperAttnFunc(torch.autograd.Function):" }, { "identifier": "AngularLSHTriton", "path": "src/angular_lsh_triton.py", "snippet": "class AngularLSHTriton(torch.nn.Module):\n \"\"\"\n inputs:\n - num_projs: a positive integer that determines the number of random projections used by hash function\n - dim: positive integer that determines the dimension of input vectors\n - mat: a tensor whose last shape is equal to dim and gets hashed by the lsh function\n output:\n - buckets: a tensor with shape mat.shape[:-1] and each entry is an integer in [0, 2^num_proj - 1]\n \"\"\"\n def __init__(self, num_projs, dim, rng=None):\n super().__init__()\n self.num_projs = num_projs\n\n if num_projs > 0:\n self.register_buffer('perm', self._unit_hamming_distance_array(self.num_projs), persistent=False)\n self.register_buffer('proj_dir', torch.randn(dim + (num_projs,), generator=rng), persistent=False)\n self.register_buffer('enc_vec', 2 ** torch.arange(self.num_projs).view(1, 1, 1, -1), persistent=False)\n else:\n raise ValueError(\"Invalid value for num_projs\")\n\n def _unit_hamming_distance_array(self, size_n):\n if size_n == 1:\n return torch.tensor([0, 1], dtype=torch.int32)\n a = self._unit_hamming_distance_array(size_n - 1)\n b = torch.concat([a, torch.flip(a, dims=[0]) + 2 ** (size_n - 1)], 0)\n return b if b.stride(-1) == 1 else b.contiguous()\n\n def hash_torch(self, mat):\n mask = torch.einsum('...nd,...dr -> ...nr', mat, self.proj_dir)\n mask = mask > 0\n bin_ids = (mask * self.enc_vec).sum(-1)\n return self.perm[bin_ids]\n\n def hash_triton(self, mat):\n return _angular_lsh(mat, self.proj_dir, self.perm, self.enc_vec)\n\n def __repr__(self):\n return f\"AngularLSH(num_proj={self.num_projs}, proj_dir.shape={self.proj_dir.shape})\"" } ]
import torch from src.attn_utils import add_self_attentions from src.flash_attn_triton import flash_attn_func from src.hyper_attn_triton import hyper_attn_func from src.angular_lsh_triton import AngularLSHTriton
4,569
class HyperAttention(torch.nn.Module): def __init__(self, input_dim=64, lsh_num_projs=8, block_size=256, sample_size=256, min_seq_len=2048, smooth_block=False, **kwargs): """ - block_size and sample_size must be divisible by 128 """ super().__init__() self.input_dim = input_dim self.lsh_num_projs = lsh_num_projs self.block_size = block_size self.sample_size = sample_size self.min_seq_len = min_seq_len self.smooth_block = smooth_block self.lsh = AngularLSHTriton(num_projs=self.lsh_num_projs, dim=(1, 1, input_dim)) def forward(self, query: torch.tensor, key: torch.tensor, value: torch.tensor, scale=None, causal=False, return_lse=False): """ Forward function for HyperAttention. If no causal masking, simply invokes forward_no_causal_mask method. If there is causal masking, it partitions the attention matrix and recurses on the partitions. inputs: - query, key, and valu: must have same sequence lengths but dimension of values vectors can be different from that of query or key - sequence lengths must be divisible by block_size output: - attn: (approximation of) the final attention output tensor - lse: (approximation of) log sum exp of the qk matrix """ query = query.contiguous() key = key.contiguous() value = value.contiguous() n_query = query.shape[2] batch_size, n_heads, n_key, dim = key.shape scale = scale or dim ** (-0.5) assert n_query == n_key # without causal masking if causal is False: attn, lse = self.forward_no_causal_mask(query, key, value, scale) else: # with causal masking if n_key <= self.min_seq_len: attn, lse = flash_attn_func(query.transpose(1, 2), key.transpose(1, 2), value.transpose(1, 2), None, True, scale) attn = attn.transpose(1, 2) else: # If n_query is odd we pad inputs by zero rows if n_query % 2: query = torch.nn.functional.pad(query, (0, 0, 0, 1), mode='constant', value=0.) key = torch.nn.functional.pad(key, (0, 0, 0, 1), mode='constant', value=0.) value = torch.nn.functional.pad(value, (0, 0, 0, 1), mode='constant', value=0.) # extract block diagonal parts q_bd = query.view(batch_size, 2 * n_heads, query.shape[2] // 2, query.shape[-1]) k_bd = key.view(batch_size, 2 * n_heads, key.shape[2] // 2, key.shape[-1]) v_bd = value.view(batch_size, 2 * n_heads, key.shape[2] // 2, value.shape[-1]) attn_bd, lse_bd = self.forward(q_bd, k_bd, v_bd, scale, True, True) if attn_bd.shape[2] not in attn_bd.stride(): attn_bd = attn_bd.contiguous() attn_bd = attn_bd.view(batch_size, n_heads, -1, dim) if lse_bd.shape[2] not in lse_bd.stride(): lse_bd = lse_bd.contiguous() lse_bd = lse_bd.view(batch_size, n_heads, -1, 1) # lowe diagonal block is an unmasked attention attn_unmasked, lse_unmasked = self.forward_no_causal_mask( query[:, :, key.shape[2] // 2:, :], key[:, :, :key.shape[2] // 2, :], value[:, :, :key.shape[2] // 2, :], scale) attn_up, lse_up = attn_bd[:, :, :query.shape[2] // 2, :], lse_bd[:, :, :query.shape[2] // 2, :]
class HyperAttention(torch.nn.Module): def __init__(self, input_dim=64, lsh_num_projs=8, block_size=256, sample_size=256, min_seq_len=2048, smooth_block=False, **kwargs): """ - block_size and sample_size must be divisible by 128 """ super().__init__() self.input_dim = input_dim self.lsh_num_projs = lsh_num_projs self.block_size = block_size self.sample_size = sample_size self.min_seq_len = min_seq_len self.smooth_block = smooth_block self.lsh = AngularLSHTriton(num_projs=self.lsh_num_projs, dim=(1, 1, input_dim)) def forward(self, query: torch.tensor, key: torch.tensor, value: torch.tensor, scale=None, causal=False, return_lse=False): """ Forward function for HyperAttention. If no causal masking, simply invokes forward_no_causal_mask method. If there is causal masking, it partitions the attention matrix and recurses on the partitions. inputs: - query, key, and valu: must have same sequence lengths but dimension of values vectors can be different from that of query or key - sequence lengths must be divisible by block_size output: - attn: (approximation of) the final attention output tensor - lse: (approximation of) log sum exp of the qk matrix """ query = query.contiguous() key = key.contiguous() value = value.contiguous() n_query = query.shape[2] batch_size, n_heads, n_key, dim = key.shape scale = scale or dim ** (-0.5) assert n_query == n_key # without causal masking if causal is False: attn, lse = self.forward_no_causal_mask(query, key, value, scale) else: # with causal masking if n_key <= self.min_seq_len: attn, lse = flash_attn_func(query.transpose(1, 2), key.transpose(1, 2), value.transpose(1, 2), None, True, scale) attn = attn.transpose(1, 2) else: # If n_query is odd we pad inputs by zero rows if n_query % 2: query = torch.nn.functional.pad(query, (0, 0, 0, 1), mode='constant', value=0.) key = torch.nn.functional.pad(key, (0, 0, 0, 1), mode='constant', value=0.) value = torch.nn.functional.pad(value, (0, 0, 0, 1), mode='constant', value=0.) # extract block diagonal parts q_bd = query.view(batch_size, 2 * n_heads, query.shape[2] // 2, query.shape[-1]) k_bd = key.view(batch_size, 2 * n_heads, key.shape[2] // 2, key.shape[-1]) v_bd = value.view(batch_size, 2 * n_heads, key.shape[2] // 2, value.shape[-1]) attn_bd, lse_bd = self.forward(q_bd, k_bd, v_bd, scale, True, True) if attn_bd.shape[2] not in attn_bd.stride(): attn_bd = attn_bd.contiguous() attn_bd = attn_bd.view(batch_size, n_heads, -1, dim) if lse_bd.shape[2] not in lse_bd.stride(): lse_bd = lse_bd.contiguous() lse_bd = lse_bd.view(batch_size, n_heads, -1, 1) # lowe diagonal block is an unmasked attention attn_unmasked, lse_unmasked = self.forward_no_causal_mask( query[:, :, key.shape[2] // 2:, :], key[:, :, :key.shape[2] // 2, :], value[:, :, :key.shape[2] // 2, :], scale) attn_up, lse_up = attn_bd[:, :, :query.shape[2] // 2, :], lse_bd[:, :, :query.shape[2] // 2, :]
attn_down, lse_down = add_self_attentions(attn_bd[:, :, query.shape[2] // 2:, :],
0
2023-12-08 21:28:22+00:00
8k
Psivant/femto
femto/fe/tests/septop/test_runner.py
[ { "identifier": "_prepare_complex_phase", "path": "femto/fe/septop/_runner.py", "snippet": "@femto.md.utils.mpi.run_on_rank_zero\ndef _prepare_complex_phase(\n config: \"femto.fe.septop.SepTopPhaseConfig\",\n ligand_1_coords: pathlib.Path,\n ligand_1_params: pathlib.Path,\n ligand_2_coords: pathlib.Path | None,\n ligand_2_params: pathlib.Path | None,\n receptor_coords: pathlib.Path,\n receptor_params: pathlib.Path | None,\n ligand_1_ref_atoms: tuple[str, str, str] | None = None,\n ligand_2_ref_atoms: tuple[str, str, str] | None = None,\n receptor_ref_atoms: tuple[str, str, str] | None = None,\n) -> tuple[parmed.Structure, openmm.System]:\n import femto.fe.septop\n\n receptor = femto.md.system.load_receptor(\n receptor_coords,\n receptor_params,\n config.setup.solvent.tleap_sources,\n )\n\n ligand_1, ligand_2 = femto.md.system.load_ligands(\n ligand_1_coords, ligand_1_params, ligand_2_coords, ligand_2_params\n )\n\n return femto.fe.septop.setup_complex(\n config.setup,\n receptor,\n ligand_1,\n ligand_2,\n receptor_ref_atoms,\n ligand_1_ref_atoms,\n ligand_2_ref_atoms,\n )" }, { "identifier": "_prepare_solution_phase", "path": "femto/fe/septop/_runner.py", "snippet": "@femto.md.utils.mpi.run_on_rank_zero\ndef _prepare_solution_phase(\n config: \"femto.fe.septop.SepTopPhaseConfig\",\n ligand_1_coords: pathlib.Path,\n ligand_1_params: pathlib.Path,\n ligand_2_coords: pathlib.Path | None,\n ligand_2_params: pathlib.Path | None,\n ligand_1_ref_atoms: tuple[str, str, str] | None = None,\n ligand_2_ref_atoms: tuple[str, str, str] | None = None,\n) -> tuple[parmed.Structure, openmm.System]:\n ligand_1, ligand_2 = femto.md.system.load_ligands(\n ligand_1_coords, ligand_1_params, ligand_2_coords, ligand_2_params\n )\n return femto.fe.septop._setup.setup_solution(\n config.setup, ligand_1, ligand_2, ligand_1_ref_atoms, ligand_2_ref_atoms\n )" }, { "identifier": "run_complex_phase", "path": "femto/fe/septop/_runner.py", "snippet": "def run_complex_phase(\n config: \"femto.fe.septop.SepTopConfig\",\n ligand_1_coords: pathlib.Path,\n ligand_1_params: pathlib.Path,\n ligand_2_coords: pathlib.Path | None,\n ligand_2_params: pathlib.Path | None,\n receptor_coords: pathlib.Path,\n receptor_params: pathlib.Path | None,\n output_dir: pathlib.Path,\n report_dir: pathlib.Path | None = None,\n ligand_1_ref_atoms: tuple[str, str, str] | None = None,\n ligand_2_ref_atoms: tuple[str, str, str] | None = None,\n receptor_ref_atoms: tuple[str, str, str] | None = None,\n):\n \"\"\"Run the complex phase of the SepTop calculation.\n\n Args:\n config: The configuration.\n ligand_1_coords: The coordinates of the first ligand.\n ligand_1_params: The parameters of the first ligand.\n ligand_2_coords: The coordinates of the second ligand.\n ligand_2_params: The parameters of the second ligand.\n receptor_coords: The coordinates of the receptor.\n receptor_params: The parameters of the receptor.\n output_dir: The directory to store all outputs in.\n report_dir: The directory to store the logs / reports in.\n ligand_1_ref_atoms: The AMBER style query masks that select the first ligands\n reference atoms.\n ligand_2_ref_atoms: The AMBER style query masks that select the second ligands\n reference atoms.\n receptor_ref_atoms: The AMBER style query mask that selects the receptor atoms\n used to align the ligand.\n \"\"\"\n\n prepare_fn = functools.partial(\n _prepare_complex_phase,\n config.complex,\n ligand_1_coords,\n ligand_1_params,\n ligand_2_coords,\n ligand_2_params,\n receptor_coords,\n receptor_params,\n ligand_1_ref_atoms,\n ligand_2_ref_atoms,\n receptor_ref_atoms,\n )\n _run_phase(config.complex, prepare_fn, output_dir, report_dir)" }, { "identifier": "run_solution_phase", "path": "femto/fe/septop/_runner.py", "snippet": "def run_solution_phase(\n config: \"femto.fe.septop.SepTopConfig\",\n ligand_1_coords: pathlib.Path,\n ligand_1_params: pathlib.Path,\n ligand_2_coords: pathlib.Path | None,\n ligand_2_params: pathlib.Path | None,\n output_dir: pathlib.Path,\n report_dir: pathlib.Path | None = None,\n ligand_1_ref_atoms: tuple[str, str, str] | None = None,\n ligand_2_ref_atoms: tuple[str, str, str] | None = None,\n):\n \"\"\"Run the solution phase of the SepTop calculation.\n\n Args:\n config: The configuration.\n ligand_1_coords: The coordinates of the first ligand.\n ligand_1_params: The parameters of the first ligand.\n ligand_2_coords: The coordinates of the second ligand.\n ligand_2_params: The parameters of the second ligand.\n output_dir: The directory to store all outputs in.\n report_dir: The directory to store the report in.\n ligand_1_ref_atoms: The AMBER style query masks that select the first ligands\n reference atoms.\n ligand_2_ref_atoms: The AMBER style query masks that select the second ligands\n reference atoms.\n \"\"\"\n\n prepare_fn = functools.partial(\n _prepare_solution_phase,\n config.solution,\n ligand_1_coords,\n ligand_1_params,\n ligand_2_coords,\n ligand_2_params,\n ligand_1_ref_atoms,\n ligand_2_ref_atoms,\n )\n _run_phase(config.solution, prepare_fn, output_dir, report_dir)" }, { "identifier": "submit_network", "path": "femto/fe/septop/_runner.py", "snippet": "def submit_network(\n config: \"femto.fe.septop.SepTopConfig\",\n network: femto.fe.inputs.Network,\n output_dir: pathlib.Path,\n queue_options: femto.fe.utils.queue.SLURMOptions,\n mpi_command: list[str] | None = None,\n) -> list[tuple[str, str, str]]:\n \"\"\"Submits a set of SepTop calculations to an HPC queueing manager.\n\n Args:\n config: The configuration.\n network: The network of edges to run.\n output_dir: The directory to store any outputs in.\n queue_options: The options to use when submitting the jobs.\n mpi_command: The mpi runner command to use. The default is\n ``\"srun --mpi=pmix\"``.\n\n Returns:\n The ids of the submitted jobs.\n \"\"\"\n\n mpi_command = mpi_command if mpi_command is not None else [\"srun\", \"--mpi=pmix\"]\n\n output_dir.mkdir(exist_ok=True, parents=True)\n\n date_str = datetime.datetime.now().strftime(\"%Y-%m-%d--%H-%M-%S\")\n config_path = output_dir / f\"config-{date_str}.yaml\"\n config_path.write_text(config.model_dump_yaml(sort_keys=False))\n\n femto_command = [\"femto\", \"septop\", \"--config\", config_path]\n\n slurm_job_ids = []\n\n for edge in network.edges:\n edge_dir = output_dir / f\"{edge.ligand_1.name}~{edge.ligand_2.name}\"\n\n complex_output_dir = edge_dir / \"complex\"\n solution_output_dir = edge_dir / \"solution\"\n\n ligand_args = [\n *_create_run_flags(edge.ligand_1, \"ligand-1\"),\n *_create_run_flags(edge.ligand_2, \"ligand-2\"),\n ]\n\n run_solution_id = femto.fe.utils.queue.submit_slurm_job(\n [\n *mpi_command,\n *femto_command,\n \"run-solution\",\n *ligand_args,\n f\"--output-dir={solution_output_dir}\",\n f\"--report-dir={solution_output_dir}\",\n ],\n queue_options,\n edge_dir / f\"run-solution-{date_str}.out\",\n )\n run_complex_id = femto.fe.utils.queue.submit_slurm_job(\n [\n *mpi_command,\n *femto_command,\n \"run-complex\",\n *_create_run_flags(network.receptor, \"receptor\"),\n *ligand_args,\n f\"--output-dir={complex_output_dir}\",\n f\"--report-dir={complex_output_dir}\",\n ],\n queue_options,\n edge_dir / f\"run-complex-{date_str}.out\",\n )\n\n analyze_id = femto.fe.utils.queue.submit_slurm_job(\n [\n *femto_command,\n \"analyze\",\n \"--complex-samples\",\n complex_output_dir / \"_sample/samples.arrow\",\n \"--complex-system\",\n complex_output_dir / \"_setup/system.xml\",\n \"--solution-samples\",\n solution_output_dir / \"_sample/samples.arrow\",\n \"--solution-system\",\n solution_output_dir / \"_setup/system.xml\",\n \"--output\",\n edge_dir / \"ddg.csv\",\n ],\n queue_options,\n edge_dir / f\"analyze-{date_str}.out\",\n [run_solution_id, run_complex_id],\n )\n\n slurm_job_ids.append((run_solution_id, run_complex_id, analyze_id))\n\n return slurm_job_ids" }, { "identifier": "CDK2_SYSTEM", "path": "femto/fe/tests/systems.py", "snippet": "CDK2_SYSTEM = TestSystem(\n directory=CDK2_DATA_DIR,\n receptor_name=\"cdk2\",\n receptor_coords=CDK2_DATA_DIR / \"cdk2.pdb\",\n receptor_params=None,\n receptor_cavity_mask=\":12,14,16,22,84,87,88,134,146,147 & @CA\",\n receptor_ref_atoms=(\"@1\", \"@2\", \"@3\"),\n ligand_1_name=\"1h1q\",\n ligand_1_coords=CDK2_DATA_DIR / \"1h1q.rst7\",\n ligand_1_params=CDK2_DATA_DIR / \"1h1q.parm7\",\n ligand_1_ref_atoms=(\"@14\", \"@21\", \"@18\"),\n ligand_2_name=\"1oiu\",\n ligand_2_coords=CDK2_DATA_DIR / \"1oiu.rst7\",\n ligand_2_params=CDK2_DATA_DIR / \"1oiu.parm7\",\n ligand_2_ref_atoms=(\"@16\", \"@23\", \"@20\"),\n)" }, { "identifier": "build_mock_structure", "path": "femto/md/tests/mocking.py", "snippet": "def build_mock_structure(smiles: list[str]) -> parmed.Structure:\n \"\"\"Build a mock structure from a list of SMILES patterns\n\n Notes:\n * A conformer is generated for each molecule.\n\n Args:\n smiles: A list of SMILES patterns.\n\n Returns:\n The mock structure.\n \"\"\"\n molecules = [Chem.MolFromSmiles(pattern) for pattern in smiles]\n\n for molecule, pattern in zip(molecules, smiles, strict=True):\n assert molecule is not None, f\"{pattern} is not a valid SMILES pattern\"\n\n complex = Chem.Mol()\n\n for i, molecule in enumerate(molecules):\n molecule = Chem.AddHs(molecule)\n AllChem.EmbedMolecule(molecule)\n\n is_water = Chem.MolToSmiles(Chem.RemoveHs(molecule)) == \"O\"\n\n residue_name = (\n \"WAT\"\n if is_water\n else (\n f\"{molecule.GetAtomWithIdx(0).GetSymbol()}\"\n if molecule.GetNumAtoms() == 1\n else \"UNK\"\n )\n )\n symbol_count = collections.defaultdict(int)\n\n for atom in molecule.GetAtoms():\n atom_name = f\"{atom.GetSymbol()}{symbol_count[atom.GetSymbol()] + 1}\"\n atom_info = Chem.AtomPDBResidueInfo(\n atom_name.ljust(4, \" \"), atom.GetIdx(), \"\", residue_name, i\n )\n atom.SetMonomerInfo(atom_info)\n\n symbol_count[atom.GetSymbol()] += 1\n\n complex = Chem.CombineMols(complex, molecule)\n\n with tempfile.NamedTemporaryFile(suffix=\".pdb\") as tmp_file:\n Chem.MolToPDBFile(complex, tmp_file.name)\n structure = parmed.load_file(tmp_file.name, structure=True)\n\n return structure" } ]
import openmm import parmed import femto.fe.inputs import femto.fe.septop import femto.fe.utils.queue from femto.fe.septop._runner import ( _prepare_complex_phase, _prepare_solution_phase, run_complex_phase, run_solution_phase, submit_network, ) from femto.fe.tests.systems import CDK2_SYSTEM from femto.md.tests.mocking import build_mock_structure
3,831
def test_prepare_solution_phase(mock_bfe_directory, mocker): mock_setup = mocker.patch( "femto.fe.septop._setup.setup_solution", autospec=True, return_value=(parmed.Structure(), openmm.System()), ) ligand_1_coords = mock_bfe_directory / "forcefield/1h1q/vacuum.mol2" ligand_1_params = mock_bfe_directory / "forcefield/1h1q/vacuum.parm7" ligand_2_coords = mock_bfe_directory / "forcefield/1oiu/vacuum.mol2" ligand_2_params = mock_bfe_directory / "forcefield/1oiu/vacuum.parm7" ligand_1_ref_atoms = ("@1", "@2", "@3") ligand_2_ref_atoms = ("@4", "@5", "@6") config = femto.fe.septop.SepTopConfig().solution topology, system = _prepare_solution_phase( config, ligand_1_coords, ligand_1_params, ligand_2_coords, ligand_2_params, ligand_1_ref_atoms, ligand_2_ref_atoms, ) assert isinstance(system, openmm.System) assert isinstance(topology, parmed.Structure) mock_setup.assert_called_once_with( config.setup, mocker.ANY, mocker.ANY, ligand_1_ref_atoms, ligand_2_ref_atoms ) def test_prepare_complex_phase(mock_bfe_directory, mocker): mock_setup = mocker.patch( "femto.fe.septop.setup_complex", autospec=True, return_value=(parmed.Structure(), openmm.System()), ) mock_parameterize = mocker.patch( "femto.md.utils.amber.parameterize_structure", autospec=True ) receptor_coords = mock_bfe_directory / "proteins/cdk2/protein.pdb" receptor_params = None ligand_1_coords = mock_bfe_directory / "forcefield/1h1q/vacuum.mol2" ligand_1_params = mock_bfe_directory / "forcefield/1h1q/vacuum.parm7" ligand_2_coords = mock_bfe_directory / "forcefield/1oiu/vacuum.mol2" ligand_2_params = mock_bfe_directory / "forcefield/1oiu/vacuum.parm7" ligand_1_ref_atoms = ("@1", "@2", "@3") ligand_2_ref_atoms = ("@4", "@5", "@6") receptor_ref_atoms = ("@7", "@8", "@9") config = femto.fe.septop.SepTopConfig().complex
def test_prepare_solution_phase(mock_bfe_directory, mocker): mock_setup = mocker.patch( "femto.fe.septop._setup.setup_solution", autospec=True, return_value=(parmed.Structure(), openmm.System()), ) ligand_1_coords = mock_bfe_directory / "forcefield/1h1q/vacuum.mol2" ligand_1_params = mock_bfe_directory / "forcefield/1h1q/vacuum.parm7" ligand_2_coords = mock_bfe_directory / "forcefield/1oiu/vacuum.mol2" ligand_2_params = mock_bfe_directory / "forcefield/1oiu/vacuum.parm7" ligand_1_ref_atoms = ("@1", "@2", "@3") ligand_2_ref_atoms = ("@4", "@5", "@6") config = femto.fe.septop.SepTopConfig().solution topology, system = _prepare_solution_phase( config, ligand_1_coords, ligand_1_params, ligand_2_coords, ligand_2_params, ligand_1_ref_atoms, ligand_2_ref_atoms, ) assert isinstance(system, openmm.System) assert isinstance(topology, parmed.Structure) mock_setup.assert_called_once_with( config.setup, mocker.ANY, mocker.ANY, ligand_1_ref_atoms, ligand_2_ref_atoms ) def test_prepare_complex_phase(mock_bfe_directory, mocker): mock_setup = mocker.patch( "femto.fe.septop.setup_complex", autospec=True, return_value=(parmed.Structure(), openmm.System()), ) mock_parameterize = mocker.patch( "femto.md.utils.amber.parameterize_structure", autospec=True ) receptor_coords = mock_bfe_directory / "proteins/cdk2/protein.pdb" receptor_params = None ligand_1_coords = mock_bfe_directory / "forcefield/1h1q/vacuum.mol2" ligand_1_params = mock_bfe_directory / "forcefield/1h1q/vacuum.parm7" ligand_2_coords = mock_bfe_directory / "forcefield/1oiu/vacuum.mol2" ligand_2_params = mock_bfe_directory / "forcefield/1oiu/vacuum.parm7" ligand_1_ref_atoms = ("@1", "@2", "@3") ligand_2_ref_atoms = ("@4", "@5", "@6") receptor_ref_atoms = ("@7", "@8", "@9") config = femto.fe.septop.SepTopConfig().complex
topology, system = _prepare_complex_phase(
0
2023-12-07 15:28:18+00:00
8k
AIFSH/NativeDancer
nativedancer/processors/frame/modules/face_enhancer.py
[ { "identifier": "helperdoc", "path": "nativedancer/helperdoc.py", "snippet": "DOC =\\\n{\n\t'python_not_supported': 'Python version is not supported, upgrade to {version} or higher',\n\t'ffmpeg_not_installed': 'FFMpeg is not installed',\n\t'install_dependency_help': 'select the variant of {dependency} to install',\n\t'source_help': 'select a source image',\n\t'target_help': 'select a target image or video',\n\t'output_help': 'specify the output file or directory',\n\t'frame_processors_help': 'choose from the available frame processors (choices: {choices}, ...)',\n\t'frame_processor_model_help': 'choose the model for the frame processor',\n\t'frame_processor_blend_help': 'specify the blend factor for the frame processor',\n\t'ui_layouts_help': 'choose from the available ui layouts (choices: {choices}, ...)',\n\t'keep_fps_help': 'preserve the frames per second (fps) of the target',\n\t'keep_temp_help': 'retain temporary frames after processing',\n\t'skip_audio_help': 'omit audio from the target',\n\t'trim_frame_start_help': 'specify the start frame for extraction',\n\t'trim_frame_end_help': 'specify the end frame for extraction',\n\t'temp_frame_format_help': 'specify the image format used for frame extraction',\n\t'temp_frame_quality_help': 'specify the image quality used for frame extraction',\n\t'output_image_quality_help': 'specify the quality used for the output image',\n\t'output_video_encoder_help': 'specify the encoder used for the output video',\n\t'output_video_quality_help': 'specify the quality used for the output video',\n\t'max_memory_help': 'specify the maximum amount of ram to be used (in gb)',\n\t'execution_providers_help': 'choose from the available execution providers',\n\t'execution_thread_count_help': 'specify the number of execution threads',\n\t'execution_queue_count_help': 'specify the number of execution queries',\n\t'skip_download_help': 'omit automate downloads and lookups',\n\t'headless_help': 'run the program in headless mode',\n\t'creating_temp': 'Creating temporary resources',\n\t'extracting_frames_fps': 'Extracting frames with {fps} FPS',\n\t'analysing': 'Analysing',\n\t'processing': 'Processing',\n\t'downloading': 'Downloading',\n\t'temp_frames_not_found': 'Temporary frames not found',\n\t'compressing_image': 'Compressing image',\n\t'compressing_image_failed': 'Compressing image failed',\n\t'merging_video_fps': 'Merging video with {fps} FPS',\n\t'merging_video_failed': 'Merging video failed',\n\t'skipping_audio': 'Skipping audio',\n\t'restoring_audio': 'Restoring audio',\n\t'restoring_audio_failed': 'Restoring audio failed',\n\t'clearing_temp': 'Clearing temporary resources',\n\t'processing_image_succeed': 'Processing to image succeed',\n\t'processing_image_failed': 'Processing to image failed',\n\t'processing_video_succeed': 'Processing to video succeed',\n\t'processing_video_failed': 'Processing to video failed',\n\t'model_download_not_done': 'Download of the model is not done',\n\t'model_file_not_present': 'File of the model is not present',\n\t'select_image_source': 'Select an image for source path',\n\t'select_image_or_video_target': 'Select an image or video for target path',\n\t'select_file_or_directory_output': 'Select an file or directory for output path',\n\t'frame_processor_not_loaded': 'Frame processor {frame_processor} could not be loaded',\n\t'frame_processor_not_implemented': 'Frame processor {frame_processor} not implemented correctly',\n\t'ui_layout_not_loaded': 'UI layout {ui_layout} could not be loaded',\n\t'ui_layout_not_implemented': 'UI layout {ui_layout} not implemented correctly',\n\t'donate_button_label': 'DONATE',\n\t'start_button_label': 'START ENHANCER',\n\t'stop_button_label': 'STOP',\n\t'clear_button_label': 'CLEAR',\n\t'benchmark_runs_checkbox_group_label': 'BENCHMARK RUNS',\n\t'benchmark_results_dataframe_label': 'BENCHMARK RESULTS',\n\t'benchmark_cycles_slider_label': 'BENCHMARK CYCLES',\n\t'execution_providers_checkbox_group_label': 'EXECUTION PROVIDERS',\n\t'execution_thread_count_slider_label': 'EXECUTION THREAD COUNT',\n\t'execution_queue_count_slider_label': 'EXECUTION QUEUE COUNT',\n\t'max_memory_slider_label': 'MAX MEMORY',\n\t'output_image_or_video_label': 'OUTPUT',\n\t'output_path_textbox_label': 'OUTPUT PATH',\n\t'output_image_quality_slider_label': 'OUTPUT IMAGE QUALITY',\n\t'output_video_encoder_dropdown_label': 'OUTPUT VIDEO ENCODER',\n\t'output_video_quality_slider_label': 'OUTPUT VIDEO QUALITY',\n\t'preview_image_label': 'PREVIEW',\n\t'preview_frame_slider_label': 'PREVIEW FRAME',\n\t'frame_processors_checkbox_group_label': 'FRAME PROCESSORS',\n\t'frame_enhancer_model_dropdown_label': 'FRAME ENHANCER MODEL',\n\t'frame_enhancer_blend_slider_label': 'FRAME ENHANCER BLEND',\n\t'common_options_checkbox_group_label': 'OPTIONS',\n\t'temp_frame_format_dropdown_label': 'TEMP FRAME FORMAT',\n\t'temp_frame_quality_slider_label': 'TEMP FRAME QUALITY',\n\t'trim_frame_start_slider_label': 'TRIM FRAME START',\n\t'trim_frame_end_slider_label': 'TRIM FRAME END',\n\t'source_file_label': 'SOURCE',\n\t'target_file_label': 'TARGET',\n\t'webcam_image_label': 'WEBCAM',\n\t'webcam_mode_radio_label': 'WEBCAM MODE',\n\t'webcam_resolution_dropdown': 'WEBCAM RESOLUTION',\n\t'webcam_fps_slider': 'WEBCAM FPS',\n\t'point': '.',\n\t'comma': ',',\n\t'colon': ':',\n\t'question_mark': '?',\n\t'exclamation_mark': '!',\n\t'random_seed_help' : 'random seed for magicanimate generate',\n\t'guidance_scale_help' : 'guidance scale for magicanimate generate',\n\t'step_help' : 'step per frame for magicanimate generate',\n\t'face_debugger_items_help': 'specify the face debugger items',\n\t'face_analyser_order_help': 'specify the order used for the face analyser',\n\t'face_analyser_age_help': 'specify the age used for the face analyser',\n\t'face_analyser_gender_help': 'specify the gender used for the face analyser',\n\t'face_detector_model_help': 'specify the model used for the face detector',\n\t'face_detector_size_help': 'specify the size threshold used for the face detector',\n\t'face_detector_score_help': 'specify the score threshold used for the face detector',\n\t'face_selector_mode_help': 'specify the mode for the face selector',\n\t'reference_face_position_help': 'specify the position of the reference face',\n\t'reference_face_distance_help': 'specify the distance between the reference face and the target face',\n\t'reference_frame_number_help': 'specify the number of the reference frame',\n\t'face_mask_blur_help': 'specify the blur amount for face mask',\n\t'face_mask_padding_help': 'specify the face mask padding (top, right, bottom, left) in percent',\n\t'no_source_face_detected': 'No source face detected',\n\t'face_analyser_order_dropdown_label': 'FACE ANALYSER ORDER',\n\t'face_analyser_age_dropdown_label': 'FACE ANALYSER AGE',\n\t'face_analyser_gender_dropdown_label': 'FACE ANALYSER GENDER',\n\t'face_detector_model_dropdown_label': 'FACE DETECTOR MODEL',\n\t'face_detector_size_dropdown_label': 'FACE DETECTOR SIZE',\n\t'face_detector_score_slider_label': 'FACE DETECTOR SCORE',\n\t'face_selector_mode_dropdown_label': 'FACE SELECTOR MODE',\n\t'reference_face_gallery_label': 'REFERENCE FACE',\n\t'reference_face_distance_slider_label': 'REFERENCE FACE DISTANCE',\n\t'face_mask_blur_slider_label': 'FACE MASK BLUR',\n\t'face_mask_padding_top_slider_label': 'FACE MASK PADDING TOP',\n\t'face_mask_padding_bottom_slider_label': 'FACE MASK PADDING BOTTOM',\n\t'face_mask_padding_left_slider_label': 'FACE MASK PADDING LEFT',\n\t'face_mask_padding_right_slider_label': 'FACE MASK PADDING RIGHT',\n\t'face_swapper_model_dropdown_label': 'FACE SWAPPER MODEL',\n\t'face_enhancer_model_dropdown_label': 'FACE ENHANCER MODEL',\n\t'face_enhancer_blend_slider_label': 'FACE ENHANCER BLEND',\n\t'face_debugger_items_checkbox_group_label': 'FACE DEBUGGER ITEMS',\n\t'animate_seed_label' : 'ANIMATE SEED',\n\t'animate_step_label' : 'ANIMATE STEP',\n\t'animate_scale_label': 'ANIMATE SCALE',\n\t'animate_start_label': 'GENERATE ANIMATE',\n\t'animate_video_label': 'ANIMATE VIDEO'\n\n}\ndef get(key : str) -> str:" }, { "identifier": "get_many_faces", "path": "nativedancer/face_analyser.py", "snippet": "def get_many_faces(frame : Frame) -> List[Face]:\n\ttry:\n\t\tfaces_cache = get_faces_cache(frame)\n\t\tif faces_cache:\n\t\t\tfaces = faces_cache\n\t\telse:\n\t\t\tfaces = extract_faces(frame)\n\t\t\tset_faces_cache(frame, faces)\n\t\tif nativedancer.globals.face_analyser_order:\n\t\t\tfaces = sort_by_order(faces, nativedancer.globals.face_analyser_order)\n\t\tif nativedancer.globals.face_analyser_age:\n\t\t\tfaces = filter_by_age(faces, nativedancer.globals.face_analyser_age)\n\t\tif nativedancer.globals.face_analyser_gender:\n\t\t\tfaces = filter_by_gender(faces, nativedancer.globals.face_analyser_gender)\n\t\treturn faces\n\texcept (AttributeError, ValueError):\n\t\treturn []" }, { "identifier": "clear_face_analyser", "path": "nativedancer/face_analyser.py", "snippet": "def clear_face_analyser() -> Any:\n\tglobal FACE_ANALYSER\n\n\tFACE_ANALYSER = None" }, { "identifier": "warp_face", "path": "nativedancer/face_helper.py", "snippet": "def warp_face(temp_frame : Frame, kps : Kps, template : Template, size : Size) -> Tuple[Frame, Matrix]:\n\tnormed_template = TEMPLATES.get(template) * size[1] / size[0]\n\taffine_matrix = cv2.estimateAffinePartial2D(kps, normed_template, method = cv2.LMEDS)[0]\n\tcrop_frame = cv2.warpAffine(temp_frame, affine_matrix, (size[1], size[1]), borderMode = cv2.BORDER_REPLICATE)\n\treturn crop_frame, affine_matrix" }, { "identifier": "paste_back", "path": "nativedancer/face_helper.py", "snippet": "def paste_back(temp_frame : Frame, crop_frame: Frame, affine_matrix : Matrix, face_mask_blur : float, face_mask_padding : Padding) -> Frame:\n\tinverse_matrix = cv2.invertAffineTransform(affine_matrix)\n\ttemp_frame_size = temp_frame.shape[:2][::-1]\n\tmask_size = tuple(crop_frame.shape[:2])\n\tmask_frame = create_static_mask_frame(mask_size, face_mask_blur, face_mask_padding)\n\tinverse_mask_frame = cv2.warpAffine(mask_frame, inverse_matrix, temp_frame_size).clip(0, 1)\n\tinverse_crop_frame = cv2.warpAffine(crop_frame, inverse_matrix, temp_frame_size, borderMode = cv2.BORDER_REPLICATE)\n\tpaste_frame = temp_frame.copy()\n\tpaste_frame[:, :, 0] = inverse_mask_frame * inverse_crop_frame[:, :, 0] + (1 - inverse_mask_frame) * temp_frame[:, :, 0]\n\tpaste_frame[:, :, 1] = inverse_mask_frame * inverse_crop_frame[:, :, 1] + (1 - inverse_mask_frame) * temp_frame[:, :, 1]\n\tpaste_frame[:, :, 2] = inverse_mask_frame * inverse_crop_frame[:, :, 2] + (1 - inverse_mask_frame) * temp_frame[:, :, 2]\n\treturn paste_frame" }, { "identifier": "clear_content_analyser", "path": "nativedancer/content_analyser.py", "snippet": "def clear_content_analyser() -> None:\n\tglobal CONTENT_ANALYSER\n\n\tCONTENT_ANALYSER = None" }, { "identifier": "Face", "path": "nativedancer/typing.py", "snippet": "" }, { "identifier": "conditional_download", "path": "nativedancer/utils.py", "snippet": "def conditional_download(download_directory_path : str, urls : List[str]) -> None:\n\twith ThreadPoolExecutor() as executor:\n\t\tfor url in urls:\n\t\t\texecutor.submit(get_download_size, url)\n\tfor url in urls:\n\t\tdownload_file_path = os.path.join(download_directory_path, os.path.basename(url))\n\t\ttotal = get_download_size(url)\n\t\tif is_file(download_file_path):\n\t\t\tinitial = os.path.getsize(download_file_path)\n\t\telse:\n\t\t\tinitial = 0\n\t\tif initial < total:\n\t\t\twith tqdm(total = total, initial = initial, desc = helperdoc.get('downloading'), unit = 'B', unit_scale = True, unit_divisor = 1024, ascii = ' =') as progress:\n\t\t\t\tsubprocess.Popen([ 'curl', '--create-dirs', '--silent', '--insecure', '--location', '--continue-at', '-', '--output', download_file_path, url ])\n\t\t\t\tcurrent = initial\n\t\t\t\twhile current < total:\n\t\t\t\t\tif is_file(download_file_path):\n\t\t\t\t\t\tcurrent = os.path.getsize(download_file_path)\n\t\t\t\t\t\tprogress.update(current - progress.n)" }, { "identifier": "resolve_relative_path", "path": "nativedancer/utils.py", "snippet": "def resolve_relative_path(path : str) -> str:\n\treturn os.path.abspath(os.path.join(os.path.dirname(__file__), path))" }, { "identifier": "is_image", "path": "nativedancer/utils.py", "snippet": "def is_image(image_path : str) -> bool:\n\tif is_file(image_path):\n\t\tmimetype = filetype.guess(image_path).mime\n\t\treturn bool(mimetype and mimetype.startswith('image/'))\n\treturn False" }, { "identifier": "is_video", "path": "nativedancer/utils.py", "snippet": "def is_video(video_path : str) -> bool:\n\tif is_file(video_path):\n\t\tmimetype = filetype.guess(video_path).mime\n\t\treturn bool(mimetype and mimetype.startswith('video/'))\n\treturn False" }, { "identifier": "is_file", "path": "nativedancer/utils.py", "snippet": "def is_file(file_path : str) -> bool:\n\treturn bool(file_path and os.path.isfile(file_path))" }, { "identifier": "is_download_done", "path": "nativedancer/utils.py", "snippet": "def is_download_done(url : str, file_path : str) -> bool:\n\tif is_file(file_path):\n\t\treturn get_download_size(url) == os.path.getsize(file_path)\n\treturn False" }, { "identifier": "create_metavar", "path": "nativedancer/utils.py", "snippet": "def create_metavar(ranges : List[Any]) -> str:\n\treturn '[' + str(ranges[0]) + '-' + str(ranges[-1]) + ']'" }, { "identifier": "update_status", "path": "nativedancer/utils.py", "snippet": "def update_status(message : str, scope : str = 'NATIVEDANCER.CORE') -> None:\n\tprint('[' + scope + '] ' + message)" }, { "identifier": "read_image", "path": "nativedancer/vision.py", "snippet": "def read_image(image_path : str) -> Optional[Frame]:\n\tif image_path:\n\t\treturn cv2.imread(image_path)\n\treturn None" }, { "identifier": "read_static_image", "path": "nativedancer/vision.py", "snippet": "@lru_cache(maxsize = 128)\ndef read_static_image(image_path : str) -> Optional[Frame]:\n\treturn read_image(image_path)" }, { "identifier": "write_image", "path": "nativedancer/vision.py", "snippet": "def write_image(image_path : str, frame : Frame) -> bool:\n\tif image_path:\n\t\treturn cv2.imwrite(image_path, frame)\n\treturn False" }, { "identifier": "globals", "path": "nativedancer/processors/frame/globals.py", "snippet": "" }, { "identifier": "choices", "path": "nativedancer/processors/frame/choices.py", "snippet": "" } ]
from typing import Any, List, Dict, Literal, Optional from argparse import ArgumentParser from nativedancer import helperdoc from nativedancer.face_analyser import get_many_faces, clear_face_analyser from nativedancer.face_helper import warp_face, paste_back from nativedancer.content_analyser import clear_content_analyser from nativedancer.typing import Face, Frame, Update_Process, ProcessMode, ModelValue, OptionsWithModel from nativedancer.utils import conditional_download, resolve_relative_path, is_image, is_video, is_file, is_download_done, create_metavar, update_status from nativedancer.vision import read_image, read_static_image, write_image from nativedancer.processors.frame import globals as frame_processors_globals from nativedancer.processors.frame import choices as frame_processors_choices import cv2 import threading import numpy import onnxruntime import nativedancer.globals import nativedancer.processors.frame.core as frame_processors
4,828
FRAME_PROCESSOR = None THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore() THREAD_LOCK : threading.Lock = threading.Lock() NAME = 'NATIVEDANCER.FRAME_PROCESSOR.FACE_ENHANCER' MODELS : Dict[str, ModelValue] =\ { 'codeformer': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx', 'path': resolve_relative_path('../weights/face_enhancer/codeformer.onnx'), 'template': 'ffhq', 'size': (512, 512) }, 'gfpgan_1.2': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx', 'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.2.onnx'), 'template': 'ffhq', 'size': (512, 512) }, 'gfpgan_1.3': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx', 'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.3.onnx'), 'template': 'ffhq', 'size': (512, 512) }, 'gfpgan_1.4': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx', 'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.4.onnx'), 'template': 'ffhq', 'size': (512, 512) }, 'gpen_bfr_256': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx', 'path': resolve_relative_path('../weights/face_enhancer/gpen_bfr_256.onnx'), 'template': 'arcface_v2', 'size': (128, 256) }, 'gpen_bfr_512': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx', 'path': resolve_relative_path('../weights/face_enhancer/gpen_bfr_512.onnx'), 'template': 'ffhq', 'size': (512, 512) }, 'restoreformer': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer.onnx', 'path': resolve_relative_path('../weights/face_enhancer/restoreformer.onnx'), 'template': 'ffhq', 'size': (512, 512) } } OPTIONS : Optional[OptionsWithModel] = None def get_frame_processor() -> Any: global FRAME_PROCESSOR with THREAD_LOCK: if FRAME_PROCESSOR is None: model_path = get_options('model').get('path') FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = nativedancer.globals.execution_providers) return FRAME_PROCESSOR def clear_frame_processor() -> None: global FRAME_PROCESSOR FRAME_PROCESSOR = None def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.face_enhancer_model] } return OPTIONS.get(key) def set_options(key : Literal['model'], value : Any) -> None: global OPTIONS OPTIONS[key] = value def register_args(program : ArgumentParser) -> None: program.add_argument('--face-enhancer-model', help = helperdoc.get('frame_processor_model_help'), dest = 'face_enhancer_model', default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models)
FRAME_PROCESSOR = None THREAD_SEMAPHORE : threading.Semaphore = threading.Semaphore() THREAD_LOCK : threading.Lock = threading.Lock() NAME = 'NATIVEDANCER.FRAME_PROCESSOR.FACE_ENHANCER' MODELS : Dict[str, ModelValue] =\ { 'codeformer': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/codeformer.onnx', 'path': resolve_relative_path('../weights/face_enhancer/codeformer.onnx'), 'template': 'ffhq', 'size': (512, 512) }, 'gfpgan_1.2': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.2.onnx', 'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.2.onnx'), 'template': 'ffhq', 'size': (512, 512) }, 'gfpgan_1.3': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.3.onnx', 'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.3.onnx'), 'template': 'ffhq', 'size': (512, 512) }, 'gfpgan_1.4': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gfpgan_1.4.onnx', 'path': resolve_relative_path('../weights/face_enhancer/gfpgan_1.4.onnx'), 'template': 'ffhq', 'size': (512, 512) }, 'gpen_bfr_256': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_256.onnx', 'path': resolve_relative_path('../weights/face_enhancer/gpen_bfr_256.onnx'), 'template': 'arcface_v2', 'size': (128, 256) }, 'gpen_bfr_512': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/gpen_bfr_512.onnx', 'path': resolve_relative_path('../weights/face_enhancer/gpen_bfr_512.onnx'), 'template': 'ffhq', 'size': (512, 512) }, 'restoreformer': { 'url': 'https://github.com/facefusion/facefusion-assets/releases/download/models/restoreformer.onnx', 'path': resolve_relative_path('../weights/face_enhancer/restoreformer.onnx'), 'template': 'ffhq', 'size': (512, 512) } } OPTIONS : Optional[OptionsWithModel] = None def get_frame_processor() -> Any: global FRAME_PROCESSOR with THREAD_LOCK: if FRAME_PROCESSOR is None: model_path = get_options('model').get('path') FRAME_PROCESSOR = onnxruntime.InferenceSession(model_path, providers = nativedancer.globals.execution_providers) return FRAME_PROCESSOR def clear_frame_processor() -> None: global FRAME_PROCESSOR FRAME_PROCESSOR = None def get_options(key : Literal['model']) -> Any: global OPTIONS if OPTIONS is None: OPTIONS =\ { 'model': MODELS[frame_processors_globals.face_enhancer_model] } return OPTIONS.get(key) def set_options(key : Literal['model'], value : Any) -> None: global OPTIONS OPTIONS[key] = value def register_args(program : ArgumentParser) -> None: program.add_argument('--face-enhancer-model', help = helperdoc.get('frame_processor_model_help'), dest = 'face_enhancer_model', default = 'gfpgan_1.4', choices = frame_processors_choices.face_enhancer_models)
program.add_argument('--face-enhancer-blend', help = helperdoc.get('frame_processor_blend_help'), dest = 'face_enhancer_blend', type = int, default = 80, choices = frame_processors_choices.face_enhancer_blend_range, metavar = create_metavar(frame_processors_choices.face_enhancer_blend_range))
13
2023-12-10 20:14:00+00:00
8k
ethanweber/nerfiller
nerfiller/inpaint/saicinpainting/training/data/datasets.py
[ { "identifier": "InpaintingDataset", "path": "nerfiller/inpaint/saicinpainting/evaluation/data.py", "snippet": "class InpaintingDataset(Dataset):\n def __init__(self, datadir, img_suffix=\".jpg\", pad_out_to_modulo=None, scale_factor=None):\n self.datadir = datadir\n self.mask_filenames = sorted(list(glob.glob(os.path.join(self.datadir, \"**\", \"*mask*.png\"), recursive=True)))\n self.img_filenames = [fname.rsplit(\"_mask\", 1)[0] + img_suffix for fname in self.mask_filenames]\n self.pad_out_to_modulo = pad_out_to_modulo\n self.scale_factor = scale_factor\n\n def __len__(self):\n return len(self.mask_filenames)\n\n def __getitem__(self, i):\n image = load_image(self.img_filenames[i], mode=\"RGB\")\n mask = load_image(self.mask_filenames[i], mode=\"L\")\n result = dict(image=image, mask=mask[None, ...])\n\n if self.scale_factor is not None:\n result[\"image\"] = scale_image(result[\"image\"], self.scale_factor)\n result[\"mask\"] = scale_image(result[\"mask\"], self.scale_factor, interpolation=cv2.INTER_NEAREST)\n\n if self.pad_out_to_modulo is not None and self.pad_out_to_modulo > 1:\n result[\"unpad_to_size\"] = result[\"image\"].shape[1:]\n result[\"image\"] = pad_img_to_modulo(result[\"image\"], self.pad_out_to_modulo)\n result[\"mask\"] = pad_img_to_modulo(result[\"mask\"], self.pad_out_to_modulo)\n\n return result" }, { "identifier": "OurInpaintingDataset", "path": "nerfiller/inpaint/saicinpainting/evaluation/data.py", "snippet": "class OurInpaintingDataset(Dataset):\n def __init__(self, datadir, img_suffix=\".jpg\", pad_out_to_modulo=None, scale_factor=None):\n self.datadir = datadir\n self.mask_filenames = sorted(\n list(\n glob.glob(\n os.path.join(self.datadir, \"mask\", \"**\", \"*mask*.png\"),\n recursive=True,\n )\n )\n )\n self.img_filenames = [\n os.path.join(\n self.datadir,\n \"img\",\n os.path.basename(fname.rsplit(\"-\", 1)[0].rsplit(\"_\", 1)[0]) + \".png\",\n )\n for fname in self.mask_filenames\n ]\n self.pad_out_to_modulo = pad_out_to_modulo\n self.scale_factor = scale_factor\n\n def __len__(self):\n return len(self.mask_filenames)\n\n def __getitem__(self, i):\n result = dict(\n image=load_image(self.img_filenames[i], mode=\"RGB\"),\n mask=load_image(self.mask_filenames[i], mode=\"L\")[None, ...],\n )\n\n if self.scale_factor is not None:\n result[\"image\"] = scale_image(result[\"image\"], self.scale_factor)\n result[\"mask\"] = scale_image(result[\"mask\"], self.scale_factor)\n\n if self.pad_out_to_modulo is not None and self.pad_out_to_modulo > 1:\n result[\"image\"] = pad_img_to_modulo(result[\"image\"], self.pad_out_to_modulo)\n result[\"mask\"] = pad_img_to_modulo(result[\"mask\"], self.pad_out_to_modulo)\n\n return result" }, { "identifier": "ceil_modulo", "path": "nerfiller/inpaint/saicinpainting/evaluation/data.py", "snippet": "def ceil_modulo(x, mod):\n if x % mod == 0:\n return x\n return (x // mod + 1) * mod" }, { "identifier": "InpaintingEvalOnlineDataset", "path": "nerfiller/inpaint/saicinpainting/evaluation/data.py", "snippet": "class InpaintingEvalOnlineDataset(Dataset):\n def __init__(\n self,\n indir,\n mask_generator,\n img_suffix=\".jpg\",\n pad_out_to_modulo=None,\n scale_factor=None,\n **kwargs,\n ):\n self.indir = indir\n self.mask_generator = mask_generator\n self.img_filenames = sorted(list(glob.glob(os.path.join(self.indir, \"**\", f\"*{img_suffix}\"), recursive=True)))\n self.pad_out_to_modulo = pad_out_to_modulo\n self.scale_factor = scale_factor\n\n def __len__(self):\n return len(self.img_filenames)\n\n def __getitem__(self, i):\n img, raw_image = load_image(self.img_filenames[i], mode=\"RGB\", return_orig=True)\n mask = self.mask_generator(img, raw_image=raw_image)\n result = dict(image=img, mask=mask)\n\n if self.scale_factor is not None:\n result[\"image\"] = scale_image(result[\"image\"], self.scale_factor)\n result[\"mask\"] = scale_image(result[\"mask\"], self.scale_factor, interpolation=cv2.INTER_NEAREST)\n\n if self.pad_out_to_modulo is not None and self.pad_out_to_modulo > 1:\n result[\"image\"] = pad_img_to_modulo(result[\"image\"], self.pad_out_to_modulo)\n result[\"mask\"] = pad_img_to_modulo(result[\"mask\"], self.pad_out_to_modulo)\n return result" }, { "identifier": "IAAAffine2", "path": "nerfiller/inpaint/saicinpainting/training/data/aug.py", "snippet": "class IAAAffine2(DualIAATransform):\n \"\"\"Place a regular grid of points on the input and randomly move the neighbourhood of these point around\n via affine transformations.\n\n Note: This class introduce interpolation artifacts to mask if it has values other than {0;1}\n\n Args:\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask\n \"\"\"\n\n def __init__(\n self,\n scale=(0.7, 1.3),\n translate_percent=None,\n translate_px=None,\n rotate=0.0,\n shear=(-0.1, 0.1),\n order=1,\n cval=0,\n mode=\"reflect\",\n always_apply=False,\n p=0.5,\n ):\n super(IAAAffine2, self).__init__(always_apply, p)\n self.scale = dict(x=scale, y=scale)\n self.translate_percent = to_tuple(translate_percent, 0)\n self.translate_px = to_tuple(translate_px, 0)\n self.rotate = to_tuple(rotate)\n self.shear = dict(x=shear, y=shear)\n self.order = order\n self.cval = cval\n self.mode = mode\n\n @property\n def processor(self):\n return iaa.Affine(\n self.scale,\n self.translate_percent,\n self.translate_px,\n self.rotate,\n self.shear,\n self.order,\n self.cval,\n self.mode,\n )\n\n def get_transform_init_args_names(self):\n return (\n \"scale\",\n \"translate_percent\",\n \"translate_px\",\n \"rotate\",\n \"shear\",\n \"order\",\n \"cval\",\n \"mode\",\n )" }, { "identifier": "IAAPerspective2", "path": "nerfiller/inpaint/saicinpainting/training/data/aug.py", "snippet": "class IAAPerspective2(DualIAATransform):\n \"\"\"Perform a random four point perspective transform of the input.\n\n Note: This class introduce interpolation artifacts to mask if it has values other than {0;1}\n\n Args:\n scale ((float, float): standard deviation of the normal distributions. These are used to sample\n the random distances of the subimage's corners from the full image's corners. Default: (0.05, 0.1).\n p (float): probability of applying the transform. Default: 0.5.\n\n Targets:\n image, mask\n \"\"\"\n\n def __init__(\n self,\n scale=(0.05, 0.1),\n keep_size=True,\n always_apply=False,\n p=0.5,\n order=1,\n cval=0,\n mode=\"replicate\",\n ):\n super(IAAPerspective2, self).__init__(always_apply, p)\n self.scale = to_tuple(scale, 1.0)\n self.keep_size = keep_size\n self.cval = cval\n self.mode = mode\n\n @property\n def processor(self):\n return iaa.PerspectiveTransform(self.scale, keep_size=self.keep_size, mode=self.mode, cval=self.cval)\n\n def get_transform_init_args_names(self):\n return (\"scale\", \"keep_size\")" }, { "identifier": "get_mask_generator", "path": "nerfiller/inpaint/saicinpainting/training/data/masks.py", "snippet": "def get_mask_generator(kind, kwargs):\n if kind is None:\n kind = \"mixed\"\n if kwargs is None:\n kwargs = {}\n\n if kind == \"mixed\":\n cl = MixedMaskGenerator\n elif kind == \"outpainting\":\n cl = OutpaintingMaskGenerator\n elif kind == \"dumb\":\n cl = DumbAreaMaskGenerator\n else:\n raise NotImplementedError(f\"No such generator kind = {kind}\")\n return cl(**kwargs)" } ]
import glob import logging import os import random import albumentations as A import cv2 import numpy as np import torch import torch.nn.functional as F import webdataset from omegaconf import open_dict, OmegaConf from torch.utils.data import ( Dataset, IterableDataset, DataLoader, DistributedSampler, ConcatDataset, ) from nerfiller.inpaint.saicinpainting.evaluation.data import ( InpaintingDataset as InpaintingEvaluationDataset, OurInpaintingDataset as OurInpaintingEvaluationDataset, ceil_modulo, InpaintingEvalOnlineDataset, ) from nerfiller.inpaint.saicinpainting.training.data.aug import ( IAAAffine2, IAAPerspective2, ) from nerfiller.inpaint.saicinpainting.training.data.masks import get_mask_generator
4,338
return ohe.permute(2, 0, 1).float(), tensor.unsqueeze(0) def get_transforms(transform_variant, out_size): if transform_variant == "default": transform = A.Compose( [ A.RandomScale(scale_limit=0.2), # +/- 20% A.PadIfNeeded(min_height=out_size, min_width=out_size), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "distortions": transform = A.Compose( [ IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.7, 1.3), rotate=(-40, 40), shear=(-0.1, 0.1)), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "distortions_scale05_1": transform = A.Compose( [ IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.5, 1.0), rotate=(-40, 40), shear=(-0.1, 0.1), p=1), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "distortions_scale03_12": transform = A.Compose( [ IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.3, 1.2), rotate=(-40, 40), shear=(-0.1, 0.1), p=1), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "distortions_scale03_07": transform = A.Compose( [ IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.3, 0.7), rotate=(-40, 40), shear=(-0.1, 0.1), p=1), # scale 512 to 256 in average A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "distortions_light": transform = A.Compose( [ IAAPerspective2(scale=(0.0, 0.02)), IAAAffine2(scale=(0.8, 1.8), rotate=(-20, 20), shear=(-0.03, 0.03)), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "non_space_transform": transform = A.Compose( [ A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "no_augs": transform = A.Compose([A.ToFloat()]) else: raise ValueError(f"Unexpected transform_variant {transform_variant}") return transform def make_default_train_dataloader( indir, kind="default", out_size=512, mask_gen_kwargs=None, transform_variant="default", mask_generator_kind="mixed", dataloader_kwargs=None, ddp_kwargs=None, **kwargs, ): LOGGER.info(f"Make train dataloader {kind} from {indir}. Using mask generator={mask_generator_kind}")
LOGGER = logging.getLogger(__name__) class InpaintingTrainDataset(Dataset): def __init__(self, indir, mask_generator, transform): self.in_files = list(glob.glob(os.path.join(indir, "**", "*.jpg"), recursive=True)) self.mask_generator = mask_generator self.transform = transform self.iter_i = 0 def __len__(self): return len(self.in_files) def __getitem__(self, item): path = self.in_files[item] img = cv2.imread(path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = self.transform(image=img)["image"] img = np.transpose(img, (2, 0, 1)) # TODO: maybe generate mask before augmentations? slower, but better for segmentation-based masks mask = self.mask_generator(img, iter_i=self.iter_i) self.iter_i += 1 return dict(image=img, mask=mask) class InpaintingTrainWebDataset(IterableDataset): def __init__(self, indir, mask_generator, transform, shuffle_buffer=200): self.impl = webdataset.Dataset(indir).shuffle(shuffle_buffer).decode("rgb").to_tuple("jpg") self.mask_generator = mask_generator self.transform = transform def __iter__(self): for iter_i, (img,) in enumerate(self.impl): img = np.clip(img * 255, 0, 255).astype("uint8") img = self.transform(image=img)["image"] img = np.transpose(img, (2, 0, 1)) mask = self.mask_generator(img, iter_i=iter_i) yield dict(image=img, mask=mask) class ImgSegmentationDataset(Dataset): def __init__( self, indir, mask_generator, transform, out_size, segm_indir, semantic_seg_n_classes, ): self.indir = indir self.segm_indir = segm_indir self.mask_generator = mask_generator self.transform = transform self.out_size = out_size self.semantic_seg_n_classes = semantic_seg_n_classes self.in_files = list(glob.glob(os.path.join(indir, "**", "*.jpg"), recursive=True)) def __len__(self): return len(self.in_files) def __getitem__(self, item): path = self.in_files[item] img = cv2.imread(path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = cv2.resize(img, (self.out_size, self.out_size)) img = self.transform(image=img)["image"] img = np.transpose(img, (2, 0, 1)) mask = self.mask_generator(img) segm, segm_classes = self.load_semantic_segm(path) result = dict(image=img, mask=mask, segm=segm, segm_classes=segm_classes) return result def load_semantic_segm(self, img_path): segm_path = img_path.replace(self.indir, self.segm_indir).replace(".jpg", ".png") mask = cv2.imread(segm_path, cv2.IMREAD_GRAYSCALE) mask = cv2.resize(mask, (self.out_size, self.out_size)) tensor = torch.from_numpy(np.clip(mask.astype(int) - 1, 0, None)) ohe = F.one_hot(tensor.long(), num_classes=self.semantic_seg_n_classes) # w x h x n_classes return ohe.permute(2, 0, 1).float(), tensor.unsqueeze(0) def get_transforms(transform_variant, out_size): if transform_variant == "default": transform = A.Compose( [ A.RandomScale(scale_limit=0.2), # +/- 20% A.PadIfNeeded(min_height=out_size, min_width=out_size), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "distortions": transform = A.Compose( [ IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.7, 1.3), rotate=(-40, 40), shear=(-0.1, 0.1)), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "distortions_scale05_1": transform = A.Compose( [ IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.5, 1.0), rotate=(-40, 40), shear=(-0.1, 0.1), p=1), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "distortions_scale03_12": transform = A.Compose( [ IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.3, 1.2), rotate=(-40, 40), shear=(-0.1, 0.1), p=1), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "distortions_scale03_07": transform = A.Compose( [ IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.3, 0.7), rotate=(-40, 40), shear=(-0.1, 0.1), p=1), # scale 512 to 256 in average A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "distortions_light": transform = A.Compose( [ IAAPerspective2(scale=(0.0, 0.02)), IAAAffine2(scale=(0.8, 1.8), rotate=(-20, 20), shear=(-0.03, 0.03)), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "non_space_transform": transform = A.Compose( [ A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat(), ] ) elif transform_variant == "no_augs": transform = A.Compose([A.ToFloat()]) else: raise ValueError(f"Unexpected transform_variant {transform_variant}") return transform def make_default_train_dataloader( indir, kind="default", out_size=512, mask_gen_kwargs=None, transform_variant="default", mask_generator_kind="mixed", dataloader_kwargs=None, ddp_kwargs=None, **kwargs, ): LOGGER.info(f"Make train dataloader {kind} from {indir}. Using mask generator={mask_generator_kind}")
mask_generator = get_mask_generator(kind=mask_generator_kind, kwargs=mask_gen_kwargs)
6
2023-12-07 19:12:08+00:00
8k
nnanhuang/Customize-it-3D
ldm/models/diffusion/ddim.py
[ { "identifier": "make_ddim_sampling_parameters", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):\n # select alphas for computing the variance schedule\n alphas = alphacums[ddim_timesteps]\n alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())\n\n # according the the formula provided in https://arxiv.org/abs/2010.02502\n sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))\n if verbose:\n print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')\n print(f'For the chosen value of eta, which is {eta}, '\n f'this results in the following sigma_t schedule for ddim sampler {sigmas}')\n return sigmas, alphas, alphas_prev" }, { "identifier": "make_ddim_timesteps", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):\n if ddim_discr_method == 'uniform':\n c = num_ddpm_timesteps // num_ddim_timesteps\n ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))\n elif ddim_discr_method == 'quad':\n ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)\n else:\n raise NotImplementedError(f'There is no ddim discretization method called \"{ddim_discr_method}\"')\n\n # assert ddim_timesteps.shape[0] == num_ddim_timesteps\n # add one to get the final alpha values right (the ones from first scale to data during sampling)\n steps_out = ddim_timesteps + 1\n if verbose:\n print(f'Selected timesteps for ddim sampler: {steps_out}')\n return steps_out" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "renorm_thresholding", "path": "ldm/models/diffusion/sampling_util.py", "snippet": "def renorm_thresholding(x0, value):\n # renorm\n pred_max = x0.max()\n pred_min = x0.min()\n pred_x0 = (x0 - pred_min) / (pred_max - pred_min) # 0 ... 1\n pred_x0 = 2 * pred_x0 - 1. # -1 ... 1\n\n s = torch.quantile(\n rearrange(pred_x0, 'b ... -> b (...)').abs(),\n value,\n dim=-1\n )\n s.clamp_(min=1.0)\n s = s.view(-1, *((1,) * (pred_x0.ndim - 1)))\n\n # clip by threshold\n # pred_x0 = pred_x0.clamp(-s, s) / s # needs newer pytorch # TODO bring back to pure-gpu with min/max\n\n # temporary hack: numpy on cpu\n pred_x0 = np.clip(pred_x0.cpu().numpy(), -s.cpu().numpy(), s.cpu().numpy()) / s.cpu().numpy()\n pred_x0 = torch.tensor(pred_x0).to(self.model.device)\n\n # re.renorm\n pred_x0 = (pred_x0 + 1.) / 2. # 0 ... 1\n pred_x0 = (pred_max - pred_min) * pred_x0 + pred_min # orig range\n return pred_x0" }, { "identifier": "norm_thresholding", "path": "ldm/models/diffusion/sampling_util.py", "snippet": "def norm_thresholding(x0, value):\n s = append_dims(x0.pow(2).flatten(1).mean(1).sqrt().clamp(min=value), x0.ndim)\n return x0 * (value / s)" }, { "identifier": "spatial_norm_thresholding", "path": "ldm/models/diffusion/sampling_util.py", "snippet": "def spatial_norm_thresholding(x0, value):\n # b c h w\n s = x0.pow(2).mean(1, keepdim=True).sqrt().clamp(min=value)\n return x0 * (value / s)" } ]
import torch import numpy as np from tqdm import tqdm from functools import partial from einops import rearrange from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, extract_into_tensor from ldm.models.diffusion.sampling_util import renorm_thresholding, norm_thresholding, spatial_norm_thresholding
4,471
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None): b, *_, device = *x.shape, x.device if unconditional_conditioning is None or unconditional_guidance_scale == 1.: e_t = self.model.apply_model(x, t, c) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) c_in = dict() for k in c: if isinstance(c[k], list): c_in[k] = [torch.cat([ unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k]))] else: c_in[k] = torch.cat([ unconditional_conditioning[k], c[k]]) else: c_in = torch.cat([unconditional_conditioning, c]) e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) if score_corrector is not None: assert self.model.parameterization == "eps" e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) # current prediction for x_0 pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() print(t, sqrt_one_minus_at, a_t) if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) # direction pointing to x_t dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0 @torch.no_grad() def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None): num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] assert t_enc <= num_reference_steps num_steps = t_enc if use_original_steps: alphas_next = self.alphas_cumprod[:num_steps] alphas = self.alphas_cumprod_prev[:num_steps] else: alphas_next = self.ddim_alphas[:num_steps] alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) x_next = x0 intermediates = [] inter_steps = [] for i in tqdm(range(num_steps), desc='Encoding Image'): t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) if unconditional_guidance_scale == 1.: noise_pred = self.model.apply_model(x_next, t, c) else: assert unconditional_conditioning is not None e_t_uncond, noise_pred = torch.chunk( self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), torch.cat((unconditional_conditioning, c))), 2) noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next weighted_noise_pred = alphas_next[i].sqrt() * ( (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred x_next = xt_weighted + weighted_noise_pred if return_intermediates and i % ( num_steps // return_intermediates) == 0 and i < num_steps - 1: intermediates.append(x_next) inter_steps.append(i) elif return_intermediates and i >= num_steps - 2: intermediates.append(x_next) inter_steps.append(i) out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} if return_intermediates: out.update({'intermediates': intermediates}) return x_next, out @torch.no_grad() def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): # fast, but does not allow for exact reconstruction # t serves as an index to gather the correct alphas if use_original_steps: sqrt_alphas_cumprod = self.sqrt_alphas_cumprod sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod else: sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas if noise is None: noise = torch.randn_like(x0)
"""SAMPLING ONLY.""" class DDIMSampler(object): def __init__(self, model, schedule="linear", **kwargs): super().__init__() self.model = model self.ddpm_num_timesteps = model.num_timesteps self.schedule = schedule def to(self, device): """Same as to in torch module Don't really underestand why this isn't a module in the first place""" for k, v in self.__dict__.items(): if isinstance(v, torch.Tensor): new_v = getattr(self, k).to(device) setattr(self, k, new_v) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) alphas_cumprod = self.model.alphas_cumprod assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) self.register_buffer('betas', to_torch(self.model.betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) # ddim sampling parameters ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), ddim_timesteps=self.ddim_timesteps, eta=ddim_eta,verbose=verbose) self.register_buffer('ddim_sigmas', ddim_sigmas) self.register_buffer('ddim_alphas', ddim_alphas) self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... dynamic_threshold=None, **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): ctmp = conditioning[list(conditioning.keys())[0]] while isinstance(ctmp, list): ctmp = ctmp[0] cbs = ctmp.shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DDIM sampling is {size}, eta {eta}') samples, intermediates = self.ddim_sampling(conditioning, size, callback=callback, img_callback=img_callback, quantize_denoised=quantize_x0, mask=mask, x0=x0, ddim_use_original_steps=False, noise_dropout=noise_dropout, temperature=temperature, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, x_T=x_T, log_every_t=log_every_t, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold, ) return samples, intermediates @torch.no_grad() def ddim_sampling(self, cond, shape, x_T=None, ddim_use_original_steps=False, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, log_every_t=100, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None, t_start=-1): device = self.model.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T if timesteps is None: timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps elif timesteps is not None and not ddim_use_original_steps: subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 timesteps = self.ddim_timesteps[:subset_end] timesteps = timesteps[:t_start] intermediates = {'x_inter': [img], 'pred_x0': [img]} time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] # print(f"Running DDIM Sampling with {total_steps} timesteps") iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) for i, step in enumerate(iterator): index = total_steps - i - 1 ts = torch.full((b,), step, device=device, dtype=torch.long) if mask is not None: assert x0 is not None img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? img = img_orig * mask + (1. - mask) * img outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, quantize_denoised=quantize_denoised, temperature=temperature, noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, unconditional_guidance_scale=unconditional_guidance_scale, unconditional_conditioning=unconditional_conditioning, dynamic_threshold=dynamic_threshold) img, pred_x0 = outs if callback: img = callback(i, img, pred_x0) if img_callback: img_callback(pred_x0, i) if index % log_every_t == 0 or index == total_steps - 1: intermediates['x_inter'].append(img) intermediates['pred_x0'].append(pred_x0) return img, intermediates @torch.no_grad() def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None): b, *_, device = *x.shape, x.device if unconditional_conditioning is None or unconditional_guidance_scale == 1.: e_t = self.model.apply_model(x, t, c) else: x_in = torch.cat([x] * 2) t_in = torch.cat([t] * 2) if isinstance(c, dict): assert isinstance(unconditional_conditioning, dict) c_in = dict() for k in c: if isinstance(c[k], list): c_in[k] = [torch.cat([ unconditional_conditioning[k][i], c[k][i]]) for i in range(len(c[k]))] else: c_in[k] = torch.cat([ unconditional_conditioning[k], c[k]]) else: c_in = torch.cat([unconditional_conditioning, c]) e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) if score_corrector is not None: assert self.model.parameterization == "eps" e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas # select parameters corresponding to the currently considered timestep a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) # current prediction for x_0 pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() print(t, sqrt_one_minus_at, a_t) if quantize_denoised: pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) if dynamic_threshold is not None: pred_x0 = norm_thresholding(pred_x0, dynamic_threshold) # direction pointing to x_t dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise return x_prev, pred_x0 @torch.no_grad() def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None, unconditional_guidance_scale=1.0, unconditional_conditioning=None): num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0] assert t_enc <= num_reference_steps num_steps = t_enc if use_original_steps: alphas_next = self.alphas_cumprod[:num_steps] alphas = self.alphas_cumprod_prev[:num_steps] else: alphas_next = self.ddim_alphas[:num_steps] alphas = torch.tensor(self.ddim_alphas_prev[:num_steps]) x_next = x0 intermediates = [] inter_steps = [] for i in tqdm(range(num_steps), desc='Encoding Image'): t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long) if unconditional_guidance_scale == 1.: noise_pred = self.model.apply_model(x_next, t, c) else: assert unconditional_conditioning is not None e_t_uncond, noise_pred = torch.chunk( self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)), torch.cat((unconditional_conditioning, c))), 2) noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond) xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next weighted_noise_pred = alphas_next[i].sqrt() * ( (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred x_next = xt_weighted + weighted_noise_pred if return_intermediates and i % ( num_steps // return_intermediates) == 0 and i < num_steps - 1: intermediates.append(x_next) inter_steps.append(i) elif return_intermediates and i >= num_steps - 2: intermediates.append(x_next) inter_steps.append(i) out = {'x_encoded': x_next, 'intermediate_steps': inter_steps} if return_intermediates: out.update({'intermediates': intermediates}) return x_next, out @torch.no_grad() def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): # fast, but does not allow for exact reconstruction # t serves as an index to gather the correct alphas if use_original_steps: sqrt_alphas_cumprod = self.sqrt_alphas_cumprod sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod else: sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas if noise is None: noise = torch.randn_like(x0)
return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
3
2023-12-14 11:03:35+00:00
8k
TaoHuang13/diffusion_reward
diffusion_reward/models/video_models/vqdiffusion/engine/solver.py
[ { "identifier": "get_rank", "path": "diffusion_reward/models/video_models/vqdiffusion/distributed/distributed.py", "snippet": "def get_rank():\n if not dist.is_available():\n return 0\n\n if not dist.is_initialized():\n return 0\n\n return dist.get_rank()" }, { "identifier": "is_primary", "path": "diffusion_reward/models/video_models/vqdiffusion/distributed/distributed.py", "snippet": "def is_primary():\n return get_rank() == 0" }, { "identifier": "reduce_dict", "path": "diffusion_reward/models/video_models/vqdiffusion/distributed/distributed.py", "snippet": "def reduce_dict(input_dict, average=True):\n world_size = get_world_size()\n\n if world_size < 2:\n return input_dict\n\n with torch.no_grad():\n keys = []\n values = []\n\n for k in sorted(input_dict.keys()):\n keys.append(k)\n values.append(input_dict[k])\n\n values = torch.stack(values, 0)\n dist.reduce(values, dst=0)\n\n if dist.get_rank() == 0 and average:\n values /= world_size\n\n reduced_dict = {k: v for k, v in zip(keys, values)}\n\n return reduced_dict" }, { "identifier": "EMA", "path": "diffusion_reward/models/video_models/vqdiffusion/engine/ema.py", "snippet": "class EMA(object):\n def __init__(self, \n model, \n decay=0.99, \n update_interval=1,\n device=torch.device('cpu')):\n\n self.decay = decay\n self.update_iterval = update_interval\n self.device = device\n\n self.model = model\n with torch.no_grad():\n if hasattr(model, 'get_ema_model') and callable(model.get_ema_model):\n self.ema_model = copy.deepcopy(model.get_ema_model())\n self.cur_state_dict = model.get_ema_model().state_dict()\n else:\n self.ema_model = copy.deepcopy(model) \n self.cur_state_dict = model.state_dict()\n self.ema_model.to(self.device) \n self.cur_state_dict = {k: v.clone().to(self.device) for k, v in self.cur_state_dict.items()}\n\n def update(self, iteration):\n if (iteration + 1) % self.update_iterval == 0:\n # print('{} Update ema'.format(iteration))\n if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):\n cur_state_dict = self.model.get_ema_model().state_dict()\n else:\n cur_state_dict = self.model.state_dict()\n\n ema_state_dict = self.ema_model.state_dict()\n for k in ema_state_dict.keys():\n ema_state_dict[k] = ema_state_dict[k] * self.decay + cur_state_dict[k].clone().to(self.device) * (1-self.decay)\n self.ema_model.load_state_dict(ema_state_dict)\n\n def state_dict(self):\n return self.ema_model.state_dict()\n \n def load_state_dict(self, state_dict, strict=True):\n state_dict_ = {k: v.clone().to(self.device) for k, v in state_dict.items()}\n self.ema_model.load_state_dict(state_dict_, strict=strict)\n\n def modify_to_inference(self):\n # get current model\n if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):\n self.cur_state_dict = self.model.get_ema_model().state_dict()\n else:\n self.cur_state_dict = self.model.state_dict()\n self.cur_state_dict = {k: v.clone().to(self.device) for k, v in self.cur_state_dict.items()}\n\n ema_state_dict = self.ema_model.state_dict()\n ema_state_dict = {k: v.to(self.model.device) for k, v in ema_state_dict.items()}\n if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):\n self.model.get_ema_model().load_state_dict(ema_state_dict)\n else:\n self.model.load_state_dict(ema_state_dict)\n\n def modify_to_train(self):\n self.cur_state_dict = {k: v.clone().to(self.model.device) for k, v in self.cur_state_dict.items()}\n if hasattr(self.model, 'get_ema_model') and callable(self.model.get_ema_model):\n self.model.get_ema_model().load_state_dict(self.cur_state_dict)\n else:\n self.model.load_state_dict(self.cur_state_dict)" }, { "identifier": "ReduceLROnPlateauWithWarmup", "path": "diffusion_reward/models/video_models/vqdiffusion/engine/lr_scheduler.py", "snippet": "class ReduceLROnPlateauWithWarmup(object):\n \"\"\"Reduce learning rate when a metric has stopped improving.\n Models often benefit from reducing the learning rate by a factor\n of 2-10 once learning stagnates. This scheduler reads a metrics\n quantity and if no improvement is seen for a 'patience' number\n of epochs, the learning rate is reduced.\n\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n mode (str): One of `min`, `max`. In `min` mode, lr will\n be reduced when the quantity monitored has stopped\n decreasing; in `max` mode it will be reduced when the\n quantity monitored has stopped increasing. Default: 'min'.\n factor (float): Factor by which the learning rate will be\n reduced. new_lr = lr * factor. Default: 0.1.\n patience (int): Number of epochs with no improvement after\n which learning rate will be reduced. For example, if\n `patience = 2`, then we will ignore the first 2 epochs\n with no improvement, and will only decrease the LR after the\n 3rd epoch if the loss still hasn't improved then.\n Default: 10.\n threshold (float): Threshold for measuring the new optimum,\n to only focus on significant changes. Default: 1e-4.\n threshold_mode (str): One of `rel`, `abs`. In `rel` mode,\n dynamic_threshold = best * ( 1 + threshold ) in 'max'\n mode or best * ( 1 - threshold ) in `min` mode.\n In `abs` mode, dynamic_threshold = best + threshold in\n `max` mode or best - threshold in `min` mode. Default: 'rel'.\n cooldown (int): Number of epochs to wait before resuming\n normal operation after lr has been reduced. Default: 0.\n min_lr (float or list): A scalar or a list of scalars. A\n lower bound on the learning rate of all param groups\n or each group respectively. Default: 0.\n eps (float): Minimal decay applied to lr. If the difference\n between new and old lr is smaller than eps, the update is\n ignored. Default: 1e-8.\n verbose (bool): If ``True``, prints a message to stdout for\n each update. Default: ``False``.\n warmup_lr: float or None, the learning rate to be touched after warmup\n warmup: int, the number of steps to warmup\n \"\"\"\n\n def __init__(self, optimizer, mode='min', factor=0.1, patience=10,\n threshold=1e-4, threshold_mode='rel', cooldown=0,\n min_lr=0, eps=1e-8, verbose=False, warmup_lr=None,\n warmup=0):\n\n if factor >= 1.0:\n raise ValueError('Factor should be < 1.0.')\n self.factor = factor\n\n # Attach optimizer\n if not isinstance(optimizer, Optimizer):\n raise TypeError('{} is not an Optimizer'.format(\n type(optimizer).__name__))\n self.optimizer = optimizer\n\n if isinstance(min_lr, list) or isinstance(min_lr, tuple):\n if len(min_lr) != len(optimizer.param_groups):\n raise ValueError(\"expected {} min_lrs, got {}\".format(\n len(optimizer.param_groups), len(min_lr)))\n self.min_lrs = list(min_lr)\n else:\n self.min_lrs = [min_lr] * len(optimizer.param_groups)\n\n self.patience = patience\n self.verbose = verbose\n self.cooldown = cooldown\n self.cooldown_counter = 0\n self.mode = mode\n self.threshold = threshold\n self.threshold_mode = threshold_mode\n\n self.warmup_lr = warmup_lr\n self.warmup = warmup\n \n\n self.best = None\n self.num_bad_epochs = None\n self.mode_worse = None # the worse value for the chosen mode\n self.eps = eps\n self.last_epoch = 0\n self._init_is_better(mode=mode, threshold=threshold,\n threshold_mode=threshold_mode)\n self._reset()\n\n def _prepare_for_warmup(self):\n if self.warmup_lr is not None:\n if isinstance(self.warmup_lr, (list, tuple)):\n if len(self.warmup_lr) != len(self.optimizer.param_groups):\n raise ValueError(\"expected {} warmup_lrs, got {}\".format(\n len(self.optimizer.param_groups), len(self.warmup_lr)))\n self.warmup_lrs = list(self.warmup_lr)\n else:\n self.warmup_lrs = [self.warmup_lr] * len(self.optimizer.param_groups)\n else:\n self.warmup_lrs = None\n if self.warmup > self.last_epoch:\n curr_lrs = [group['lr'] for group in self.optimizer.param_groups]\n self.warmup_lr_steps = [max(0, (self.warmup_lrs[i] - curr_lrs[i])/float(self.warmup)) for i in range(len(curr_lrs))]\n else:\n self.warmup_lr_steps = None\n\n def _reset(self):\n \"\"\"Resets num_bad_epochs counter and cooldown counter.\"\"\"\n self.best = self.mode_worse\n self.cooldown_counter = 0\n self.num_bad_epochs = 0\n\n def step(self, metrics):\n # convert `metrics` to float, in case it's a zero-dim Tensor\n current = float(metrics)\n epoch = self.last_epoch + 1\n self.last_epoch = epoch\n\n if epoch <= self.warmup:\n self._increase_lr(epoch)\n else:\n if self.is_better(current, self.best):\n self.best = current\n self.num_bad_epochs = 0\n else:\n self.num_bad_epochs += 1\n\n if self.in_cooldown:\n self.cooldown_counter -= 1\n self.num_bad_epochs = 0 # ignore any bad epochs in cooldown\n\n if self.num_bad_epochs > self.patience:\n self._reduce_lr(epoch)\n self.cooldown_counter = self.cooldown\n self.num_bad_epochs = 0\n\n self._last_lr = [group['lr'] for group in self.optimizer.param_groups]\n\n def _reduce_lr(self, epoch):\n for i, param_group in enumerate(self.optimizer.param_groups):\n old_lr = float(param_group['lr'])\n new_lr = max(old_lr * self.factor, self.min_lrs[i])\n if old_lr - new_lr > self.eps:\n param_group['lr'] = new_lr\n if self.verbose:\n print('Epoch {:5d}: reducing learning rate'\n ' of group {} to {:.4e}.'.format(epoch, i, new_lr))\n\n def _increase_lr(self, epoch):\n # used for warmup\n for i, param_group in enumerate(self.optimizer.param_groups):\n old_lr = float(param_group['lr'])\n new_lr = max(old_lr + self.warmup_lr_steps[i], self.min_lrs[i])\n param_group['lr'] = new_lr\n if self.verbose:\n print('Epoch {:5d}: increasing learning rate'\n ' of group {} to {:.4e}.'.format(epoch, i, new_lr))\n\n @property\n def in_cooldown(self):\n return self.cooldown_counter > 0\n\n def is_better(self, a, best):\n if self.mode == 'min' and self.threshold_mode == 'rel':\n rel_epsilon = 1. - self.threshold\n return a < best * rel_epsilon\n\n elif self.mode == 'min' and self.threshold_mode == 'abs':\n return a < best - self.threshold\n\n elif self.mode == 'max' and self.threshold_mode == 'rel':\n rel_epsilon = self.threshold + 1.\n return a > best * rel_epsilon\n\n else: # mode == 'max' and epsilon_mode == 'abs':\n return a > best + self.threshold\n\n def _init_is_better(self, mode, threshold, threshold_mode):\n if mode not in {'min', 'max'}:\n raise ValueError('mode ' + mode + ' is unknown!')\n if threshold_mode not in {'rel', 'abs'}:\n raise ValueError('threshold mode ' + threshold_mode + ' is unknown!')\n\n if mode == 'min':\n self.mode_worse = np.inf\n else: # mode == 'max':\n self.mode_worse = -np.inf\n\n self.mode = mode\n self.threshold = threshold\n self.threshold_mode = threshold_mode\n\n self._prepare_for_warmup()\n\n def state_dict(self):\n return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}\n\n def load_state_dict(self, state_dict):\n self.__dict__.update(state_dict)\n self._init_is_better(mode=self.mode, threshold=self.threshold, threshold_mode=self.threshold_mode)" }, { "identifier": "format_seconds", "path": "diffusion_reward/models/video_models/vqdiffusion/utils/misc.py", "snippet": "def format_seconds(seconds):\n h = int(seconds // 3600)\n m = int(seconds // 60 - h * 60)\n s = int(seconds % 60)\n\n d = int(h // 24)\n h = h - d * 24\n\n if d == 0:\n if h == 0:\n if m == 0:\n ft = '{:02d}s'.format(s)\n else:\n ft = '{:02d}m:{:02d}s'.format(m, s)\n else:\n ft = '{:02d}h:{:02d}m:{:02d}s'.format(h, m, s)\n \n else:\n ft = '{:d}d:{:02d}h:{:02d}m:{:02d}s'.format(d, h, m, s)\n\n return ft" }, { "identifier": "get_model_parameters_info", "path": "diffusion_reward/models/video_models/vqdiffusion/utils/misc.py", "snippet": "def get_model_parameters_info(model):\n # for mn, m in model.named_modules():\n parameters = {'overall': {'trainable': 0, 'non_trainable': 0, 'total': 0}}\n for child_name, child_module in model.named_children():\n parameters[child_name] = {'trainable': 0, 'non_trainable': 0}\n for pn, p in child_module.named_parameters():\n if p.requires_grad:\n parameters[child_name]['trainable'] += p.numel()\n else:\n parameters[child_name]['non_trainable'] += p.numel()\n parameters[child_name]['total'] = parameters[child_name]['trainable'] + parameters[child_name]['non_trainable']\n \n parameters['overall']['trainable'] += parameters[child_name]['trainable']\n parameters['overall']['non_trainable'] += parameters[child_name]['non_trainable']\n parameters['overall']['total'] += parameters[child_name]['total']\n \n # format the numbers\n def format_number(num):\n K = 2**10\n M = 2**20\n G = 2**30\n if num > G: # K\n uint = 'G'\n num = round(float(num)/G, 2)\n elif num > M:\n uint = 'M'\n num = round(float(num)/M, 2)\n elif num > K:\n uint = 'K'\n num = round(float(num)/K, 2)\n else:\n uint = ''\n \n return '{}{}'.format(num, uint)\n \n def format_dict(d):\n for k, v in d.items():\n if isinstance(v, dict):\n format_dict(v)\n else:\n d[k] = format_number(v)\n \n format_dict(parameters)\n return parameters" }, { "identifier": "instantiate_from_config", "path": "diffusion_reward/models/video_models/vqdiffusion/utils/misc.py", "snippet": "def instantiate_from_config(config):\n if config is None:\n return None\n if not \"target\" in config:\n raise KeyError(\"Expected key `target` to instantiate.\")\n module, cls = config[\"target\"].rsplit(\".\", 1)\n cls = getattr(importlib.import_module(module, package=None), cls)\n return cls(**config.get(\"params\", dict()))" } ]
import copy import math import os import time import torch import torchvision import matplotlib import matplotlib.pyplot as plt from omegaconf import OmegaConf from PIL import Image from torch.optim.lr_scheduler import ReduceLROnPlateau from ..distributed.distributed import get_rank, is_primary, reduce_dict from ..engine.ema import EMA from ..engine.lr_scheduler import ReduceLROnPlateauWithWarmup from ..utils.misc import (format_seconds, get_model_parameters_info, instantiate_from_config) from torch.cuda.amp import GradScaler, autocast
5,966
if 'ema' in config['solver'] and args.local_rank == 0: ema_args = config['solver']['ema'] ema_args = OmegaConf.to_container(copy.deepcopy(ema_args), resolve=True) ema_args['model'] = self.model self.ema = EMA(**ema_args) else: self.ema = None self.logger.log_info(str(get_model_parameters_info(self.model))) self.model.cuda() self.device = self.model.device if self.args.distributed: self.logger.log_info('Distributed, begin DDP the model...') self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.args.gpu], find_unused_parameters=False) self.logger.log_info('Distributed, DDP model done!') # prepare for amp self.args.amp = self.args.amp and AMP if self.args.amp: self.scaler = GradScaler() self.logger.log_info('Using AMP for training!') self.logger.log_info("{}: global rank {}: prepare solver done!".format(self.args.exp_name,self.args.global_rank), check_primary=False) self.best_loss = float('inf') def _get_optimizer_and_scheduler(self, op_sc_list): optimizer_and_scheduler = {} for op_sc_cfg in op_sc_list: op_sc = { 'name': op_sc_cfg.get('name', 'none'), 'start_epoch': op_sc_cfg.get('start_epoch', 0), 'end_epoch': op_sc_cfg.get('end_epoch', -1), 'start_iteration': op_sc_cfg.get('start_iteration', 0), 'end_iteration': op_sc_cfg.get('end_iteration', -1), } if op_sc['name'] == 'none': # parameters = self.model.parameters() parameters = filter(lambda p: p.requires_grad, self.model.parameters()) else: # NOTE: get the parameters with the given name, the parameters() should be overide parameters = self.model.parameters(name=op_sc['name']) # build optimizer op_cfg = op_sc_cfg.get('optimizer', {'target': 'torch.optim.SGD', 'params': {}}) op_cfg = OmegaConf.to_container(copy.deepcopy(op_cfg), resolve=True) if 'params' not in op_cfg: op_cfg['params'] = {} if 'lr' not in op_cfg['params']: op_cfg['params']['lr'] = self.lr op_cfg['params']['params'] = parameters optimizer = instantiate_from_config(op_cfg) op_sc['optimizer'] = { 'module': optimizer, 'step_iteration': op_cfg.get('step_iteration', 1) } assert isinstance(op_sc['optimizer']['step_iteration'], int), 'optimizer steps should be a integer number of iterations' # build scheduler if 'scheduler' in op_sc_cfg: sc_cfg = OmegaConf.to_container(copy.deepcopy(op_sc_cfg['scheduler']), resolve=True) sc_cfg['params']['optimizer'] = optimizer # for cosine annealing lr, compute T_max if sc_cfg['target'].split('.')[-1] in ['CosineAnnealingLRWithWarmup', 'CosineAnnealingLR']: T_max = self.max_epochs * self.dataloader['train_iterations'] sc_cfg['params']['T_max'] = T_max scheduler = instantiate_from_config(sc_cfg) op_sc['scheduler'] = { 'module': scheduler, 'step_iteration': sc_cfg.get('step_iteration', 1) } if op_sc['scheduler']['step_iteration'] == 'epoch': op_sc['scheduler']['step_iteration'] = self.dataloader['train_iterations'] optimizer_and_scheduler[op_sc['name']] = op_sc return optimizer_and_scheduler def _get_lr(self, return_type='str'): lrs = {} for op_sc_n, op_sc in self.optimizer_and_scheduler.items(): lr = op_sc['optimizer']['module'].state_dict()['param_groups'][0]['lr'] lrs[op_sc_n+'_lr'] = round(lr, 10) if return_type == 'str': lrs = str(lrs) lrs = lrs.replace('none', 'lr').replace('{', '').replace('}','').replace('\'', '') elif return_type == 'dict': pass else: raise ValueError('Unknow of return type: {}'.format(return_type)) return lrs def sample(self, batch, phase='train', step_type='iteration'): tic = time.time() self.logger.log_info('Begin to sample...') if self.ema is not None: self.ema.modify_to_inference() suffix = '_ema' else: suffix = '' if isinstance(self.model, torch.nn.parallel.DistributedDataParallel): model = self.model.module else: model = self.model with torch.no_grad(): if self.debug == False: if self.args.amp: with autocast(): samples = model.sample(batch=batch, step=self.last_iter) else: samples = model.sample(batch=batch, step=self.last_iter) else: samples = model.sample(batch=batch[0].cuda(), step=self.last_iter) step = self.last_iter if step_type == 'iteration' else self.last_epoch for k, v in samples.items(): save_dir = os.path.join(self.image_dir, phase, k) os.makedirs(save_dir, exist_ok=True)
# ------------------------------------------ # VQ-Diffusion # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # written By Shuyang Gu # ------------------------------------------ try: AMP = True except: print('Warning: import torch.amp failed, so no amp will be used!') AMP = False matplotlib.use('Agg') STEP_WITH_LOSS_SCHEDULERS = (ReduceLROnPlateauWithWarmup, ReduceLROnPlateau) class Solver(object): def __init__(self, config, args, model, dataloader, logger): self.config = config self.args = args self.model = model self.dataloader = dataloader self.logger = logger self.max_epochs = config['solver']['max_epochs'] self.save_epochs = config['solver']['save_epochs'] self.save_iterations = config['solver'].get('save_iterations', -1) self.sample_iterations = config['solver']['sample_iterations'] if self.sample_iterations == 'epoch': self.sample_iterations = self.dataloader['train_iterations'] self.validation_epochs = config['solver'].get('validation_epochs', 2) assert isinstance(self.save_epochs, (int, list)) assert isinstance(self.validation_epochs, (int, list)) self.debug = config['solver'].get('debug', False) self.last_epoch = -1 self.last_iter = -1 # self.ckpt_dir = os.path.join(args.save_dir, 'checkpoint') # self.image_dir = os.path.join(args.save_dir, 'images') self.ckpt_dir = "checkpoint" self.image_dir = "images" os.makedirs(self.ckpt_dir, exist_ok=True) os.makedirs(self.image_dir, exist_ok=True) # get grad_clipper if 'clip_grad_norm' in config['solver']: self.clip_grad_norm = instantiate_from_config(config['solver']['clip_grad_norm']) else: self.clip_grad_norm = None # get lr adjust_lr = config['solver'].get('adjust_lr', 'sqrt') base_lr = config['solver'].get('base_lr', 1.0e-4) if adjust_lr == 'none': self.lr = base_lr elif adjust_lr == 'sqrt': self.lr = base_lr * math.sqrt(args.world_size * config['dataloader']['batch_size']) elif adjust_lr == 'linear': self.lr = base_lr * args.world_size * config['dataloader']['batch_size'] else: raise NotImplementedError('Unknown type of adjust lr {}!'.format(adjust_lr)) self.logger.log_info('Get lr {} from base lr {} with {}'.format(self.lr, base_lr, adjust_lr)) if hasattr(model, 'get_optimizer_and_scheduler') and callable(getattr(model, 'get_optimizer_and_scheduler')): optimizer_and_scheduler = model.get_optimizer_and_scheduler(config['solver']['optimizers_and_schedulers']) else: optimizer_and_scheduler = self._get_optimizer_and_scheduler(config['solver']['optimizers_and_schedulers']) assert type(optimizer_and_scheduler) == type({}), 'optimizer and schduler should be a dict!' self.optimizer_and_scheduler = optimizer_and_scheduler # configre for ema if 'ema' in config['solver'] and args.local_rank == 0: ema_args = config['solver']['ema'] ema_args = OmegaConf.to_container(copy.deepcopy(ema_args), resolve=True) ema_args['model'] = self.model self.ema = EMA(**ema_args) else: self.ema = None self.logger.log_info(str(get_model_parameters_info(self.model))) self.model.cuda() self.device = self.model.device if self.args.distributed: self.logger.log_info('Distributed, begin DDP the model...') self.model = torch.nn.parallel.DistributedDataParallel(self.model, device_ids=[self.args.gpu], find_unused_parameters=False) self.logger.log_info('Distributed, DDP model done!') # prepare for amp self.args.amp = self.args.amp and AMP if self.args.amp: self.scaler = GradScaler() self.logger.log_info('Using AMP for training!') self.logger.log_info("{}: global rank {}: prepare solver done!".format(self.args.exp_name,self.args.global_rank), check_primary=False) self.best_loss = float('inf') def _get_optimizer_and_scheduler(self, op_sc_list): optimizer_and_scheduler = {} for op_sc_cfg in op_sc_list: op_sc = { 'name': op_sc_cfg.get('name', 'none'), 'start_epoch': op_sc_cfg.get('start_epoch', 0), 'end_epoch': op_sc_cfg.get('end_epoch', -1), 'start_iteration': op_sc_cfg.get('start_iteration', 0), 'end_iteration': op_sc_cfg.get('end_iteration', -1), } if op_sc['name'] == 'none': # parameters = self.model.parameters() parameters = filter(lambda p: p.requires_grad, self.model.parameters()) else: # NOTE: get the parameters with the given name, the parameters() should be overide parameters = self.model.parameters(name=op_sc['name']) # build optimizer op_cfg = op_sc_cfg.get('optimizer', {'target': 'torch.optim.SGD', 'params': {}}) op_cfg = OmegaConf.to_container(copy.deepcopy(op_cfg), resolve=True) if 'params' not in op_cfg: op_cfg['params'] = {} if 'lr' not in op_cfg['params']: op_cfg['params']['lr'] = self.lr op_cfg['params']['params'] = parameters optimizer = instantiate_from_config(op_cfg) op_sc['optimizer'] = { 'module': optimizer, 'step_iteration': op_cfg.get('step_iteration', 1) } assert isinstance(op_sc['optimizer']['step_iteration'], int), 'optimizer steps should be a integer number of iterations' # build scheduler if 'scheduler' in op_sc_cfg: sc_cfg = OmegaConf.to_container(copy.deepcopy(op_sc_cfg['scheduler']), resolve=True) sc_cfg['params']['optimizer'] = optimizer # for cosine annealing lr, compute T_max if sc_cfg['target'].split('.')[-1] in ['CosineAnnealingLRWithWarmup', 'CosineAnnealingLR']: T_max = self.max_epochs * self.dataloader['train_iterations'] sc_cfg['params']['T_max'] = T_max scheduler = instantiate_from_config(sc_cfg) op_sc['scheduler'] = { 'module': scheduler, 'step_iteration': sc_cfg.get('step_iteration', 1) } if op_sc['scheduler']['step_iteration'] == 'epoch': op_sc['scheduler']['step_iteration'] = self.dataloader['train_iterations'] optimizer_and_scheduler[op_sc['name']] = op_sc return optimizer_and_scheduler def _get_lr(self, return_type='str'): lrs = {} for op_sc_n, op_sc in self.optimizer_and_scheduler.items(): lr = op_sc['optimizer']['module'].state_dict()['param_groups'][0]['lr'] lrs[op_sc_n+'_lr'] = round(lr, 10) if return_type == 'str': lrs = str(lrs) lrs = lrs.replace('none', 'lr').replace('{', '').replace('}','').replace('\'', '') elif return_type == 'dict': pass else: raise ValueError('Unknow of return type: {}'.format(return_type)) return lrs def sample(self, batch, phase='train', step_type='iteration'): tic = time.time() self.logger.log_info('Begin to sample...') if self.ema is not None: self.ema.modify_to_inference() suffix = '_ema' else: suffix = '' if isinstance(self.model, torch.nn.parallel.DistributedDataParallel): model = self.model.module else: model = self.model with torch.no_grad(): if self.debug == False: if self.args.amp: with autocast(): samples = model.sample(batch=batch, step=self.last_iter) else: samples = model.sample(batch=batch, step=self.last_iter) else: samples = model.sample(batch=batch[0].cuda(), step=self.last_iter) step = self.last_iter if step_type == 'iteration' else self.last_epoch for k, v in samples.items(): save_dir = os.path.join(self.image_dir, phase, k) os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, 'e{:010d}_itr{:010d}_rank{}{}'.format(self.last_epoch, self.last_iter%self.dataloader['train_iterations'], get_rank(), suffix))
0
2023-12-05 02:42:28+00:00
8k
mkang315/ASF-YOLO
models/yolo.py
[ { "identifier": "check_anchor_order", "path": "utils/autoanchor.py", "snippet": "def check_anchor_order(m):\n # Check anchor order against stride order for YOLOv5 Detect() module m, and correct if necessary\n a = m.anchors.prod(-1).mean(-1).view(-1) # mean anchor area per output layer\n da = a[-1] - a[0] # delta a\n ds = m.stride[-1] - m.stride[0] # delta s\n if da and (da.sign() != ds.sign()): # same order\n LOGGER.info(f'{PREFIX}Reversing anchor order')\n m.anchors[:] = m.anchors.flip(0)" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "check_version", "path": "utils/general.py", "snippet": "def check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\n # Check version vs. required version\n current, minimum = (pkg.parse_version(x) for x in (current, minimum))\n result = (current == minimum) if pinned else (current >= minimum) # bool\n s = f'WARNING ⚠️ {name}{minimum} is required by YOLOv5, but {name}{current} is currently installed' # string\n if hard:\n assert result, emojis(s) # assert min requirements met\n if verbose and not result:\n LOGGER.warning(s)\n return result" }, { "identifier": "check_yaml", "path": "utils/general.py", "snippet": "def check_yaml(file, suffix=('.yaml', '.yml')):\n # Search/download YAML file (if necessary) and return path, checking suffix\n return check_file(file, suffix)" }, { "identifier": "make_divisible", "path": "utils/general.py", "snippet": "def make_divisible(x, divisor):\n # Returns nearest x divisible by divisor\n if isinstance(divisor, torch.Tensor):\n divisor = int(divisor.max()) # to int\n return math.ceil(x / divisor) * divisor" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(args: Optional[dict] = None, show_file=True, show_func=False):\n # Print function arguments (optional args dict)\n x = inspect.currentframe().f_back # previous frame\n file, _, func, _, _ = inspect.getframeinfo(x)\n if args is None: # get args automatically\n args, _, _, frm = inspect.getargvalues(x)\n args = {k: v for k, v in frm.items() if k in args}\n try:\n file = Path(file).resolve().relative_to(ROOT).with_suffix('')\n except ValueError:\n file = Path(file).stem\n s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')\n LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))" }, { "identifier": "feature_visualization", "path": "utils/plots.py", "snippet": "def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\n \"\"\"\n x: Features to be visualized\n module_type: Module type\n stage: Module stage within model\n n: Maximum number of feature maps to plot\n save_dir: Directory to save results\n \"\"\"\n if 'Detect' not in module_type:\n batch, channels, height, width = x.shape # batch, channels, height, width\n if height > 1 and width > 1:\n f = save_dir / f\"stage{stage}_{module_type.split('.')[-1]}_features.png\" # filename\n\n blocks = torch.chunk(x[0].cpu(), channels, dim=0) # select batch index 0, block by channels\n n = min(n, channels) # number of plots\n fig, ax = plt.subplots(math.ceil(n / 8), 8, tight_layout=True) # 8 rows x n/8 cols\n ax = ax.ravel()\n plt.subplots_adjust(wspace=0.05, hspace=0.05)\n for i in range(n):\n ax[i].imshow(blocks[i].squeeze()) # cmap='gray'\n ax[i].axis('off')\n\n LOGGER.info(f'Saving {f}... ({n}/{channels})')\n plt.savefig(f, dpi=300, bbox_inches='tight')\n plt.close()\n np.save(str(f.with_suffix('.npy')), x[0].cpu().numpy()) # npy save" }, { "identifier": "fuse_conv_and_bn", "path": "utils/torch_utils.py", "snippet": "def fuse_conv_and_bn(conv, bn):\n # Fuse Conv2d() and BatchNorm2d() layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/\n fusedconv = nn.Conv2d(conv.in_channels,\n conv.out_channels,\n kernel_size=conv.kernel_size,\n stride=conv.stride,\n padding=conv.padding,\n dilation=conv.dilation,\n groups=conv.groups,\n bias=True).requires_grad_(False).to(conv.weight.device)\n\n # Prepare filters\n w_conv = conv.weight.clone().view(conv.out_channels, -1)\n w_bn = torch.diag(bn.weight.div(torch.sqrt(bn.eps + bn.running_var)))\n fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))\n\n # Prepare spatial bias\n b_conv = torch.zeros(conv.weight.size(0), device=conv.weight.device) if conv.bias is None else conv.bias\n b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))\n fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)\n\n return fusedconv" }, { "identifier": "initialize_weights", "path": "utils/torch_utils.py", "snippet": "def initialize_weights(model):\n for m in model.modules():\n t = type(m)\n if t is nn.Conv2d:\n pass # nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif t is nn.BatchNorm2d:\n m.eps = 1e-3\n m.momentum = 0.03\n elif t in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True" }, { "identifier": "model_info", "path": "utils/torch_utils.py", "snippet": "def model_info(model, verbose=False, imgsz=640):\n # Model information. img_size may be int or list, i.e. img_size=640 or img_size=[640, 320]\n n_p = sum(x.numel() for x in model.parameters()) # number parameters\n n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients\n if verbose:\n print(f\"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}\")\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n print('%5g %40s %9s %12g %20s %10.3g %10.3g' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()))\n\n try: # FLOPs\n p = next(model.parameters())\n stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 # max stride\n im = torch.empty((1, p.shape[1], stride, stride), device=p.device) # input image in BCHW format\n flops = thop.profile(deepcopy(model), inputs=(im,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs\n imgsz = imgsz if isinstance(imgsz, list) else [imgsz, imgsz] # expand if int/float\n fs = f', {flops * imgsz[0] / stride * imgsz[1] / stride:.1f} GFLOPs' # 640x640 GFLOPs\n except Exception:\n fs = ''\n\n name = Path(model.yaml_file).stem.replace('yolov5', 'YOLOv5') if hasattr(model, 'yaml_file') else 'Model'\n LOGGER.info(f\"{name} summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}\")" }, { "identifier": "profile", "path": "utils/torch_utils.py", "snippet": "def profile(input, ops, n=10, device=None):\n \"\"\" YOLOv5 speed/memory/FLOPs profiler\n Usage:\n input = torch.randn(16, 3, 640, 640)\n m1 = lambda x: x * torch.sigmoid(x)\n m2 = nn.SiLU()\n profile(input, [m1, m2], n=100) # profile over 100 iterations\n \"\"\"\n results = []\n if not isinstance(device, torch.device):\n device = select_device(device)\n print(f\"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}\"\n f\"{'input':>24s}{'output':>24s}\")\n\n for x in input if isinstance(input, list) else [input]:\n x = x.to(device)\n x.requires_grad = True\n for m in ops if isinstance(ops, list) else [ops]:\n m = m.to(device) if hasattr(m, 'to') else m # device\n m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m\n tf, tb, t = 0, 0, [0, 0, 0] # dt forward, backward\n try:\n flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs\n except Exception:\n flops = 0\n\n try:\n for _ in range(n):\n t[0] = time_sync()\n y = m(x)\n t[1] = time_sync()\n try:\n _ = (sum(yi.sum() for yi in y) if isinstance(y, list) else y).sum().backward()\n t[2] = time_sync()\n except Exception: # no backward method\n # print(e) # for debug\n t[2] = float('nan')\n tf += (t[1] - t[0]) * 1000 / n # ms per op forward\n tb += (t[2] - t[1]) * 1000 / n # ms per op backward\n mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB)\n s_in, s_out = (tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' for x in (x, y)) # shapes\n p = sum(x.numel() for x in m.parameters()) if isinstance(m, nn.Module) else 0 # parameters\n print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}')\n results.append([p, flops, mem, tf, tb, s_in, s_out])\n except Exception as e:\n print(e)\n results.append(None)\n torch.cuda.empty_cache()\n return results" }, { "identifier": "scale_img", "path": "utils/torch_utils.py", "snippet": "def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416)\n # Scales img(bs,3,y,x) by ratio constrained to gs-multiple\n if ratio == 1.0:\n return img\n h, w = img.shape[2:]\n s = (int(h * ratio), int(w * ratio)) # new size\n img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize\n if not same_shape: # pad/crop img\n h, w = (math.ceil(x * ratio / gs) * gs for x in (h, w))\n return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # PyTorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" } ]
import argparse import contextlib import os import platform import sys import thop # for FLOPs computation import yaml # for torch hub from copy import deepcopy from pathlib import Path from models.common import * from models.experimental import * from utils.autoanchor import check_anchor_order from utils.general import LOGGER, check_version, check_yaml, make_divisible, print_args from utils.plots import feature_visualization from utils.torch_utils import (fuse_conv_and_bn, initialize_weights, model_info, profile, scale_img, select_device, time_sync)
5,178
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ YOLO-specific modules Usage: $ python models/yolo.py --cfg yolov5s.yaml """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH if platform.system() != 'Windows': ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative try: except ImportError: thop = None class Detect(nn.Module): # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) def forward(self, x): z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) if isinstance(self, Segment): # (boxes + masks) xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) else: # Detect (boxes only) xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid class Segment(Detect): # YOLOv5 Segment head for segmentation models def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): super().__init__(nc, anchors, ch, inplace) self.nm = nm # number of masks self.npr = npr # number of protos self.no = 5 + nc + self.nm # number of outputs per anchor self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.proto = Proto(ch[0], self.npr, self.nm) # protos self.detect = Detect.forward def forward(self, x): p = self.proto(x[0]) x = self.detect(self, x) return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) class BaseModel(nn.Module): # YOLOv5 base model def forward(self, x, profile=False, visualize=False): return self._forward_once(x, profile, visualize) # single-scale inference, train def _forward_once(self, x, profile=False, visualize=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output if visualize:
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ YOLO-specific modules Usage: $ python models/yolo.py --cfg yolov5s.yaml """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH if platform.system() != 'Windows': ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative try: except ImportError: thop = None class Detect(nn.Module): # YOLOv5 Detect head for detection models stride = None # strides computed during build dynamic = False # force grid reconstruction export = False # export mode def __init__(self, nc=80, anchors=(), ch=(), inplace=True): # detection layer super().__init__() self.nc = nc # number of classes self.no = nc + 5 # number of outputs per anchor self.nl = len(anchors) # number of detection layers self.na = len(anchors[0]) // 2 # number of anchors self.grid = [torch.empty(0) for _ in range(self.nl)] # init grid self.anchor_grid = [torch.empty(0) for _ in range(self.nl)] # init anchor grid self.register_buffer('anchors', torch.tensor(anchors).float().view(self.nl, -1, 2)) # shape(nl,na,2) self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.inplace = inplace # use inplace ops (e.g. slice assignment) def forward(self, x): z = [] # inference output for i in range(self.nl): x[i] = self.m[i](x[i]) # conv bs, _, ny, nx = x[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85) x[i] = x[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous() if not self.training: # inference if self.dynamic or self.grid[i].shape[2:4] != x[i].shape[2:4]: self.grid[i], self.anchor_grid[i] = self._make_grid(nx, ny, i) if isinstance(self, Segment): # (boxes + masks) xy, wh, conf, mask = x[i].split((2, 2, self.nc + 1, self.no - self.nc - 5), 4) xy = (xy.sigmoid() * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh.sigmoid() * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf.sigmoid(), mask), 4) else: # Detect (boxes only) xy, wh, conf = x[i].sigmoid().split((2, 2, self.nc + 1), 4) xy = (xy * 2 + self.grid[i]) * self.stride[i] # xy wh = (wh * 2) ** 2 * self.anchor_grid[i] # wh y = torch.cat((xy, wh, conf), 4) z.append(y.view(bs, self.na * nx * ny, self.no)) return x if self.training else (torch.cat(z, 1),) if self.export else (torch.cat(z, 1), x) def _make_grid(self, nx=20, ny=20, i=0, torch_1_10=check_version(torch.__version__, '1.10.0')): d = self.anchors[i].device t = self.anchors[i].dtype shape = 1, self.na, ny, nx, 2 # grid shape y, x = torch.arange(ny, device=d, dtype=t), torch.arange(nx, device=d, dtype=t) yv, xv = torch.meshgrid(y, x, indexing='ij') if torch_1_10 else torch.meshgrid(y, x) # torch>=0.7 compatibility grid = torch.stack((xv, yv), 2).expand(shape) - 0.5 # add grid offset, i.e. y = 2.0 * x - 0.5 anchor_grid = (self.anchors[i] * self.stride[i]).view((1, self.na, 1, 1, 2)).expand(shape) return grid, anchor_grid class Segment(Detect): # YOLOv5 Segment head for segmentation models def __init__(self, nc=80, anchors=(), nm=32, npr=256, ch=(), inplace=True): super().__init__(nc, anchors, ch, inplace) self.nm = nm # number of masks self.npr = npr # number of protos self.no = 5 + nc + self.nm # number of outputs per anchor self.m = nn.ModuleList(nn.Conv2d(x, self.no * self.na, 1) for x in ch) # output conv self.proto = Proto(ch[0], self.npr, self.nm) # protos self.detect = Detect.forward def forward(self, x): p = self.proto(x[0]) x = self.detect(self, x) return (x, p) if self.training else (x[0], p) if self.export else (x[0], p, x[1]) class BaseModel(nn.Module): # YOLOv5 base model def forward(self, x, profile=False, visualize=False): return self._forward_once(x, profile, visualize) # single-scale inference, train def _forward_once(self, x, profile=False, visualize=False): y, dt = [], [] # outputs for m in self.model: if m.f != -1: # if not from previous layer x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers if profile: self._profile_one_layer(m, x, dt) x = m(x) # run y.append(x if m.i in self.save else None) # save output if visualize:
feature_visualization(x, m.type, m.i, save_dir=visualize)
6
2023-12-10 14:18:29+00:00
8k
mmathew23/improved_edm
train.py
[ { "identifier": "KarrasPipeline", "path": "pipeline.py", "snippet": "class KarrasPipeline(DiffusionPipeline):\n model_cpu_offload_seq = \"unet\"\n\n def __init__(self, unet, scheduler, method='euler'):\n super().__init__()\n\n # we ignore this, just having a scheduler for HF compatibility\n scheduler = DDIMScheduler.from_config(scheduler.config)\n\n self.register_modules(unet=unet, scheduler=scheduler)\n self.trained_image_size = unet.config.sample_size\n self.method = method\n # Adjust noise levels based on what's supported by the network.\n self.sigma_min = 0.002\n self.sigma_max = 80\n self.rho = 7\n\n def step(self, x, t, num_inference_steps=50):\n if self.method == 'euler':\n return self.step_euler(x, t, num_inference_steps=num_inference_steps)\n elif self.method == 'rk':\n return self.step_rk(x, t, num_inference_steps=num_inference_steps)\n else:\n raise NotImplementedError()\n\n @torch.no_grad()\n def __call__(\n self,\n batch_size: int = 1,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n eta: float = 0.0,\n num_inference_steps: int = 50,\n use_clipped_model_output: Optional[bool] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n S_churn: float = 0.0,\n S_min: float = 0.0,\n S_max: float = float(\"inf\"),\n S_noise: float = 1.0,\n to_device: Optional[torch.device] = None,\n second_order: bool = True,\n class_labels: Optional[torch.Tensor] = None,\n ) -> Union[ImagePipelineOutput, Tuple]:\n # Sample gaussian noise to begin loop\n if isinstance(self.unet.config.sample_size, int):\n image_shape = (\n batch_size,\n self.unet.config.in_channels,\n self.unet.config.sample_size,\n self.unet.config.sample_size,\n )\n else:\n image_shape = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)\n\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n image = randn_tensor(image_shape, generator=generator, device=self._execution_device, dtype=self.unet.dtype)\n # image += 0.1 * torch.randn(\n # (image.shape[0], image.shape[1], 1, 1), device=image.device)\n\n # set step values\n self.scheduler.set_timesteps(num_inference_steps)\n # Time step discretization.\n step_indices = torch.arange(num_inference_steps, dtype=torch.float64, device=image.device)\n t_steps = (self.sigma_max ** (1 / self.rho) + step_indices / (num_inference_steps - 1) * (self.sigma_min ** (1 / self.rho) - self.sigma_max ** (1 / self.rho))) ** self.rho\n t_steps = torch.cat([torch.as_tensor(t_steps), torch.zeros_like(t_steps[:1])]).to(dtype=torch.float16)\n t_steps[-1] = 1e-6\n\n image = image * t_steps[0]\n for t in self.progress_bar(range(num_inference_steps)):\n t_cur = t_steps[t]\n t_next = t_steps[t + 1]\n gamma = min(S_churn / num_inference_steps, math.sqrt(2) - 1) if S_min <= t_cur <= S_max else 0\n t_hat = torch.as_tensor(t_cur + gamma * t_cur)\n x_hat = image + (t_hat ** 2 - t_cur ** 2).sqrt() * S_noise * torch.randn_like(image)\n\n denoised = self.unet(x_hat, t_hat, class_labels=class_labels).sample\n d_cur = (x_hat - denoised) / t_hat\n image = x_hat + (t_next - t_hat) * d_cur\n\n if second_order and t < num_inference_steps - 1:\n denoised = self.unet(image, t_next, class_labels=class_labels).sample\n d_prime = (image - denoised) / t_next\n image = x_hat + (t_next - t_hat) * (0.5 * d_cur + 0.5 * d_prime)\n\n image = (image / 2 + 0.5).clamp(0, 1)\n if output_type == \"pil\":\n image = image.cpu()\n image = self.numpy_to_pil(image.permute(0, 2, 3, 1).numpy())\n elif output_type == \"numpy\":\n image = image.cpu()\n image = image.permute(0, 2, 3, 1).numpy()\n else:\n if to_device is not None:\n image = image.to(to_device)\n\n if not return_dict:\n return (image,)\n\n return ImagePipelineOutput(images=image)" }, { "identifier": "UNet2DModel", "path": "model.py", "snippet": "class UNet2DModel(ModelMixin, ConfigMixin):\n r\"\"\"\n A 2D UNet model that takes a noisy sample and a timestep and returns a sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented\n for all models (such as downloading or saving).\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample. Dimensions must be a multiple of `2 ** (len(block_out_channels) -\n 1)`.\n in_channels (`int`, *optional*, defaults to 3): Number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 3): Number of channels in the output.\n center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.\n time_embedding_type (`str`, *optional*, defaults to `\"positional\"`): Type of time embedding to use.\n freq_shift (`int`, *optional*, defaults to 0): Frequency shift for Fourier time embedding.\n flip_sin_to_cos (`bool`, *optional*, defaults to `True`):\n Whether to flip sin to cos for Fourier time embedding.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"DownBlock2D\", \"AttnDownBlock2D\", \"AttnDownBlock2D\", \"AttnDownBlock2D\")`):\n Tuple of downsample block types.\n mid_block_type (`str`, *optional*, defaults to `\"UNetMidBlock2D\"`):\n Block type for middle of UNet, it can be either `UNetMidBlock2D` or `UnCLIPUNetMidBlock2D`.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"AttnUpBlock2D\", \"AttnUpBlock2D\", \"AttnUpBlock2D\", \"UpBlock2D\")`):\n Tuple of upsample block types.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(224, 448, 672, 896)`):\n Tuple of block output channels.\n layers_per_block (`int`, *optional*, defaults to `2`): The number of layers per block.\n mid_block_scale_factor (`float`, *optional*, defaults to `1`): The scale factor for the mid block.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n attention_head_dim (`int`, *optional*, defaults to `8`): The attention head dimension.\n norm_eps (`float`, *optional*, defaults to `1e-5`): The epsilon for normalization.\n num_class_embeds (`int`, *optional*, defaults to `None`):\n Input dimension of the learnable embedding matrix to be projected to `time_embed_dim` when performing class\n conditioning with `class_embed_type` equal to `None`.\n \"\"\"\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[Union[int, Tuple[int, int]]] = None,\n in_channels: int = 3,\n out_channels: int = 3,\n center_input_sample: bool = False,\n down_block_types: Tuple[str] = (\"DownBlock2D\", \"AttnDownBlock2D\", \"AttnDownBlock2D\", \"AttnDownBlock2D\"),\n up_block_types: Tuple[str] = (\"AttnUpBlock2D\", \"AttnUpBlock2D\", \"AttnUpBlock2D\", \"UpBlock2D\"),\n block_out_channels: Tuple[int] = (224, 448, 672, 896),\n layers_per_block: int = 2,\n mid_block_scale_factor: float = 1,\n dropout: float = 0.0,\n attention_head_dim: Optional[int] = 8,\n norm_eps: float = 1e-4,\n add_attention: bool = True,\n num_class_embeds: Optional[int] = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n # input in_channels+1 due to concating of one's to mitigate removing bias\n self.conv_in = Conv2d(in_channels+1, block_out_channels[0], kernel_size=3, padding=(1, 1), bias=False)\n\n # time\n self.time_proj = GaussianFourierProjection(embedding_size=block_out_channels[0], scale=0.25)\n timestep_input_dim = block_out_channels[0]\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # loss weighting \n self.loss_mlp = nn.Sequential(GaussianFourierProjection(embedding_size=block_out_channels[0], scale=0.25), Linear(timestep_input_dim, 1, bias=False))\n\n # class embedding\n if num_class_embeds is not None:\n self.class_embedding = ClassEmbedding(num_classes=num_class_embeds, embedding_size=time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n # down\n resnet_out_scale_factor = 1.0\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_out_scale_factor=resnet_out_scale_factor,\n attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,\n dropout=dropout,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = nn.ModuleList()\n self.add_attention = add_attention\n if add_attention:\n self.mid_block.append(\n AttnDownBlock2D(\n in_channels=block_out_channels[-1],\n out_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n dropout=dropout,\n num_layers=1,\n resnet_eps=norm_eps,\n output_scale_factor=resnet_out_scale_factor,\n attention_head_dim=attention_head_dim,\n add_downsample=False\n )\n )\n self.mid_block.append(\n DownBlock2D(\n in_channels=block_out_channels[-1],\n out_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n dropout=dropout,\n num_layers=1,\n resnet_eps=norm_eps,\n output_scale_factor=resnet_out_scale_factor,\n add_downsample=False\n )\n )\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n is_final_block = i == len(block_out_channels) - 1\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=not is_final_block,\n resnet_eps=norm_eps,\n attention_head_dim=attention_head_dim if attention_head_dim is not None else output_channel,\n dropout=dropout,\n resnet_out_scale_factor=resnet_out_scale_factor,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_out = Conv2d(block_out_channels[0], out_channels, kernel_size=3, padding=1, bias=False)\n\n # init weights to normal since weight normalization\n recursive_normal_init(self)\n self.gain = nn.Parameter(torch.ones(1, 1, 1, 1))\n\n def get_loss_module_weight(self, timestep):\n return self.loss_mlp(timestep)\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n class_labels: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n return_loss_mlp: bool = False,\n ) -> Union[UNet2DOutput, Tuple]:\n r\"\"\"\n The [`UNet2DModel`] forward method.\n\n Args:\n sample (`torch.FloatTensor`):\n The noisy input tensor with the following shape `(batch, channel, height, width)`.\n timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.\n class_labels (`torch.FloatTensor`, *optional*, defaults to `None`):\n Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d.UNet2DOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d.UNet2DOutput`] or `tuple`:\n If `return_dict` is True, an [`~models.unet_2d.UNet2DOutput`] is returned, otherwise a `tuple` is\n returned where the first element is the sample tensor.\n \"\"\"\n # 0. center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n timesteps = torch.tensor([timesteps], dtype=torch.long, device=sample.device)\n elif torch.is_tensor(timesteps) and len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps * torch.ones(sample.shape[0], dtype=timesteps.dtype, device=timesteps.device)\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when doing class conditioning\")\n\n class_emb = self.class_embedding(class_labels, sample.device, self.dtype).to(dtype=self.dtype)\n emb = emb + class_emb\n elif self.class_embedding is None and class_labels is not None:\n raise ValueError(\"class_embedding needs to be initialized in order to use class conditioning\")\n\n # 2. pre-process\n skip_sample = sample\n\n # Create a tensor of ones with the same dtype and device\n b, c, h, w = sample.shape\n ones_tensor = torch.ones(b, 1, h, w, dtype=sample.dtype, device=sample.device)\n # Concatenate along the channel dimension\n c_in = 1 / torch.sqrt(0.25+timesteps**2)\n sample = torch.cat((sample*c_in[:, None, None, None], ones_tensor), dim=1)\n sample = self.conv_in(sample)\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"skip_conv\"):\n sample, res_samples, skip_sample = downsample_block(\n hidden_states=sample, temb=emb, skip_sample=skip_sample\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n for i, block in enumerate(self.mid_block):\n if i == 0 and self.add_attention and isinstance(block, Attention):\n sample = block(sample)\n if isinstance(sample, tuple):\n sample = sample[0]\n else:\n sample = block(sample, emb)\n if isinstance(sample, tuple):\n sample = sample[0]\n # 5. up\n for upsample_block in self.up_blocks:\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n if hasattr(upsample_block, \"skip_conv\"):\n sample, skip_sample = upsample_block(sample, res_samples, emb, skip_sample)\n else:\n sample = upsample_block(sample, res_samples, emb)\n\n # 6. post-process\n c_out = (timesteps*0.5) / torch.sqrt(timesteps**2 + 0.25)\n sample = self.conv_out(sample) * c_out[:, None, None, None]\n\n if skip_sample is not None:\n c_skip = 0.25 / (0.25+timesteps**2)\n sample += skip_sample * c_skip[:, None, None, None]\n\n if return_loss_mlp:\n loss_w = self.get_loss_module_weight(timesteps)\n if not return_dict:\n return (sample,), loss_w\n\n return UNet2DOutput(sample=sample), loss_w\n if not return_dict:\n return (sample,)\n\n return UNet2DOutput(sample=sample)" } ]
import torch import torch.nn.functional as F import hydra import os import shutil import math import numpy as np import re from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from diffusers.utils import make_image_grid from torchvision.transforms import Compose, ToTensor, Normalize, RandomHorizontalFlip from omegaconf import DictConfig from hydra.core.hydra_config import HydraConfig from diffusers.optimization import get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup from diffusers import EMAModel from pipeline import KarrasPipeline from accelerate import Accelerator, DistributedDataParallelKwargs from accelerate.utils import LoggerType from tqdm import tqdm from datasets import load_dataset from model import UNet2DModel
5,760
class Sampler(torch.utils.data.Sampler): def __init__(self, dataset_length, seed=31129347): self.dataset_length = dataset_length self.seed = seed def __iter__(self): rnd = np.random.RandomState(self.seed) order = np.arange(self.dataset_length) rnd.shuffle(order) window = int(np.rint(order.size * 0.5)) if window < 2: window = 3 idx = 0 while True: idx = idx % len(order) yield order[idx] j = (idx - rnd.randint(window)) % order.size order[idx], order[j] = order[j], order[idx] idx += 1 def get_total_steps(config): # round up, round since casting may round down due to fp precision total_steps = int(round(config.num_train_kimg * 1000 / (config.train_batch_size) + 0.5)) return total_steps def map_wrapper(func, from_key, to_key): def wrapper(example): example[to_key] = func(example[from_key]) return example return wrapper def get_inverse_sqrt_schedule(optimizer, num_warmup_steps, t_ref): def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 / math.sqrt(max(1.0, (current_step - num_warmup_steps) / t_ref)) return LambdaLR(optimizer, lr_lambda) def evaluate(config, step, pipeline): if 'num_class_embeds' in config.unet: labels = torch.arange(config.unet.num_class_embeds, device='cuda:0')[:config.val_batch_size] if labels.shape[0] < config.val_batch_size: labels = labels.repeat(config.val_batch_size//labels.shape[0] + 1) labels = labels[:config.val_batch_size] else: labels = None for i in range(1): images = pipeline( batch_size=config.val_batch_size, class_labels=labels, generator=torch.manual_seed(config.seed+i), ).images cols = math.ceil(np.sqrt(len(images))) rows = math.ceil(len(images)/cols) image_grid = make_image_grid(images, rows=rows, cols=cols) test_dir = os.path.join(config.output_dir, "samples") os.makedirs(test_dir, exist_ok=True) image_grid.save(f"{test_dir}/{step:04d}_{i:03d}.png") def get_sigma(batch_size, P_mean, P_std, device): sigma = torch.randn([batch_size, 1, 1, 1], device=device) sigma = (sigma*P_std + P_mean).exp() return sigma def get_sigma_weight(sigma, sigma_data): w = (sigma**2 + sigma_data**2) / (sigma*sigma_data)**2 return w def add_noise(sample, noise, sigma): noise *= sigma return sample+noise def replace_grad_nans(model): # Iterate through all parameters for name, param in model.named_parameters(): if param.requires_grad and param.grad is not None: # Replace nan, inf, -inf in gradients with 0 torch.nan_to_num(param.grad, nan=0.0, posinf=0.0, neginf=0.0, out=param.grad) def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler): ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) assert 'gradient_accumulation_steps' in config and config.gradient_accumulation_steps >= 1 accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with=[LoggerType.TENSORBOARD, 'wandb'], project_dir=os.path.join(config.output_dir, "logs"), kwargs_handlers=[ddp_kwargs], split_batches=True ) is_distributed = accelerator.num_processes > 1 if config.use_ema:
class Sampler(torch.utils.data.Sampler): def __init__(self, dataset_length, seed=31129347): self.dataset_length = dataset_length self.seed = seed def __iter__(self): rnd = np.random.RandomState(self.seed) order = np.arange(self.dataset_length) rnd.shuffle(order) window = int(np.rint(order.size * 0.5)) if window < 2: window = 3 idx = 0 while True: idx = idx % len(order) yield order[idx] j = (idx - rnd.randint(window)) % order.size order[idx], order[j] = order[j], order[idx] idx += 1 def get_total_steps(config): # round up, round since casting may round down due to fp precision total_steps = int(round(config.num_train_kimg * 1000 / (config.train_batch_size) + 0.5)) return total_steps def map_wrapper(func, from_key, to_key): def wrapper(example): example[to_key] = func(example[from_key]) return example return wrapper def get_inverse_sqrt_schedule(optimizer, num_warmup_steps, t_ref): def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 / math.sqrt(max(1.0, (current_step - num_warmup_steps) / t_ref)) return LambdaLR(optimizer, lr_lambda) def evaluate(config, step, pipeline): if 'num_class_embeds' in config.unet: labels = torch.arange(config.unet.num_class_embeds, device='cuda:0')[:config.val_batch_size] if labels.shape[0] < config.val_batch_size: labels = labels.repeat(config.val_batch_size//labels.shape[0] + 1) labels = labels[:config.val_batch_size] else: labels = None for i in range(1): images = pipeline( batch_size=config.val_batch_size, class_labels=labels, generator=torch.manual_seed(config.seed+i), ).images cols = math.ceil(np.sqrt(len(images))) rows = math.ceil(len(images)/cols) image_grid = make_image_grid(images, rows=rows, cols=cols) test_dir = os.path.join(config.output_dir, "samples") os.makedirs(test_dir, exist_ok=True) image_grid.save(f"{test_dir}/{step:04d}_{i:03d}.png") def get_sigma(batch_size, P_mean, P_std, device): sigma = torch.randn([batch_size, 1, 1, 1], device=device) sigma = (sigma*P_std + P_mean).exp() return sigma def get_sigma_weight(sigma, sigma_data): w = (sigma**2 + sigma_data**2) / (sigma*sigma_data)**2 return w def add_noise(sample, noise, sigma): noise *= sigma return sample+noise def replace_grad_nans(model): # Iterate through all parameters for name, param in model.named_parameters(): if param.requires_grad and param.grad is not None: # Replace nan, inf, -inf in gradients with 0 torch.nan_to_num(param.grad, nan=0.0, posinf=0.0, neginf=0.0, out=param.grad) def train_loop(config, model, noise_scheduler, optimizer, train_dataloader, lr_scheduler): ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) assert 'gradient_accumulation_steps' in config and config.gradient_accumulation_steps >= 1 accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with=[LoggerType.TENSORBOARD, 'wandb'], project_dir=os.path.join(config.output_dir, "logs"), kwargs_handlers=[ddp_kwargs], split_batches=True ) is_distributed = accelerator.num_processes > 1 if config.use_ema:
ema = EMAModel(model.parameters(), 0.999, model_cls=UNet2DModel, model_config=model.config)
1
2023-12-08 16:23:47+00:00
8k
youngskkim/CRN
models/base_bev_depth.py
[ { "identifier": "BaseLSSFPN", "path": "layers/backbones/base_lss_fpn.py", "snippet": "class BaseLSSFPN(nn.Module):\n def __init__(self, x_bound, y_bound, z_bound, d_bound, final_dim,\n downsample_factor, output_channels, img_backbone_conf,\n img_neck_conf, depth_net_conf, **kwargs):\n \"\"\"Modified from `https://github.com/nv-tlabs/lift-splat-shoot`.\n\n Args:\n x_bound (list): Boundaries for x.\n y_bound (list): Boundaries for y.\n z_bound (list): Boundaries for z.\n d_bound (list): Boundaries for d.\n final_dim (list): Dimension for input images.\n downsample_factor (int): Downsample factor between feature map\n and input image.\n output_channels (int): Number of channels for the output\n feature map.\n img_backbone_conf (dict): Config for image backbone.\n img_neck_conf (dict): Config for image neck.\n depth_net_conf (dict): Config for depth net.\n \"\"\"\n\n super(BaseLSSFPN, self).__init__()\n self.downsample_factor = downsample_factor\n self.d_bound = d_bound\n self.final_dim = final_dim\n self.output_channels = output_channels\n self.camera_aware = kwargs['camera_aware']\n\n self.register_buffer(\n 'voxel_size',\n torch.Tensor([row[2] for row in [x_bound, y_bound, z_bound]]))\n self.register_buffer(\n 'voxel_coord',\n torch.Tensor([\n row[0] + row[2] / 2.0 for row in [x_bound, y_bound, z_bound]\n ]))\n self.register_buffer(\n 'voxel_num',\n torch.LongTensor([(row[1] - row[0]) / row[2]\n for row in [x_bound, y_bound, z_bound]]))\n self.register_buffer('frustum', self.create_frustum())\n\n self.depth_channels, _, _, _ = self.frustum.shape\n\n self.img_backbone = build_backbone(img_backbone_conf)\n self.img_neck = build_neck(img_neck_conf)\n self.depth_net = self._configure_depth_net(depth_net_conf)\n\n self.img_neck.init_weights()\n self.img_backbone.init_weights()\n\n def _configure_depth_net(self, depth_net_conf):\n return DepthNet(\n depth_net_conf['in_channels'],\n depth_net_conf['mid_channels'],\n self.output_channels,\n self.depth_channels,\n camera_aware=self.camera_aware\n )\n\n def create_frustum(self):\n \"\"\"Generate frustum\"\"\"\n # make grid in image plane\n ogfH, ogfW = self.final_dim\n fH, fW = ogfH // self.downsample_factor, ogfW // self.downsample_factor\n d_coords = torch.arange(*self.d_bound,\n dtype=torch.float).view(-1, 1,\n 1).expand(-1, fH, fW)\n D, _, _ = d_coords.shape\n x_coords = torch.linspace(0, ogfW - 1, fW, dtype=torch.float).view(\n 1, 1, fW).expand(D, fH, fW)\n y_coords = torch.linspace(0, ogfH - 1, fH,\n dtype=torch.float).view(1, fH,\n 1).expand(D, fH, fW)\n paddings = torch.ones_like(d_coords)\n\n # D x H x W x 3\n frustum = torch.stack((x_coords, y_coords, d_coords, paddings), -1)\n return frustum\n\n def get_geometry(self, sensor2ego_mat, intrin_mat, ida_mat, bda_mat):\n \"\"\"Transfer points from camera coord to ego coord.\n\n Args:\n rots(Tensor): Rotation matrix from camera to ego.\n trans(Tensor): Translation matrix from camera to ego.\n intrins(Tensor): Intrinsic matrix.\n post_rots_ida(Tensor): Rotation matrix for ida.\n post_trans_ida(Tensor): Translation matrix for ida\n post_rot_bda(Tensor): Rotation matrix for bda.\n\n Returns:\n Tensors: points ego coord.\n \"\"\"\n batch_size, num_cams, _, _ = sensor2ego_mat.shape\n\n # undo post-transformation\n # B x N x D x H x W x 3\n points = self.frustum\n ida_mat = ida_mat.view(batch_size, num_cams, 1, 1, 1, 4, 4)\n points = ida_mat.inverse().matmul(points.unsqueeze(-1)).double()\n # cam_to_ego\n points = torch.cat(\n (points[:, :, :, :, :, :2] * points[:, :, :, :, :, 2:3],\n points[:, :, :, :, :, 2:]), 5)\n\n combine = sensor2ego_mat.matmul(torch.inverse(intrin_mat)).double()\n points = combine.view(batch_size, num_cams, 1, 1, 1, 4,\n 4).matmul(points).half()\n if bda_mat is not None:\n bda_mat = bda_mat.unsqueeze(1).repeat(1, num_cams, 1, 1).view(\n batch_size, num_cams, 1, 1, 1, 4, 4)\n points = (bda_mat @ points).squeeze(-1)\n else:\n points = points.squeeze(-1)\n return points[..., :3]\n\n def get_cam_feats(self, imgs):\n \"\"\"Get feature maps from images.\"\"\"\n batch_size, num_sweeps, num_cams, num_channels, imH, imW = imgs.shape\n\n imgs = imgs.flatten().view(batch_size * num_sweeps * num_cams,\n num_channels, imH, imW)\n img_feats = self.img_neck(self.img_backbone(imgs))[0]\n img_feats = img_feats.reshape(batch_size, num_sweeps, num_cams,\n img_feats.shape[1], img_feats.shape[2],\n img_feats.shape[3])\n return img_feats\n\n def _forward_depth_net(self, feat, mats_dict):\n return self.depth_net(feat, mats_dict)\n\n def _forward_voxel_net(self, img_feat_with_depth):\n return img_feat_with_depth\n\n def _forward_single_sweep(self,\n sweep_index,\n sweep_imgs,\n mats_dict,\n is_return_depth=False):\n \"\"\"Forward function for single sweep.\n\n Args:\n sweep_index (int): Index of sweeps.\n sweep_imgs (Tensor): Input images.\n mats_dict (dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n is_return_depth (bool, optional): Whether to return depth.\n Default: False.\n\n Returns:\n Tensor: BEV feature map.\n \"\"\"\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t4 = torch.cuda.Event(enable_timing=True)\n t5 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n img_width = sweep_imgs.shape\n\n # extract image feature\n img_feats = self.get_cam_feats(sweep_imgs)\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['img_backbone'].append(t1.elapsed_time(t2))\n\n source_features = img_feats[:, 0, ...]\n depth_feature = self._forward_depth_net(\n source_features.reshape(batch_size * num_cams,\n source_features.shape[2],\n source_features.shape[3],\n source_features.shape[4]),\n mats_dict,\n )\n depth = depth_feature[:, :self.depth_channels].softmax(1)\n if self.times is not None:\n t3.record()\n torch.cuda.synchronize()\n self.times['img_dep'].append(t2.elapsed_time(t3))\n\n img_feat_with_depth = depth.unsqueeze(\n 1) * depth_feature[:, self.depth_channels:(\n self.depth_channels + self.output_channels)].unsqueeze(2)\n\n img_feat_with_depth = self._forward_voxel_net(img_feat_with_depth)\n\n img_feat_with_depth = img_feat_with_depth.reshape(\n batch_size,\n num_cams,\n img_feat_with_depth.shape[1],\n img_feat_with_depth.shape[2],\n img_feat_with_depth.shape[3],\n img_feat_with_depth.shape[4],\n )\n geom_xyz = self.get_geometry(\n mats_dict['sensor2ego_mats'][:, sweep_index, ...],\n mats_dict['intrin_mats'][:, sweep_index, ...],\n mats_dict['ida_mats'][:, sweep_index, ...],\n mats_dict.get('bda_mat', None),\n )\n if self.times is not None:\n t4.record()\n torch.cuda.synchronize()\n self.times['img_transform'].append(t3.elapsed_time(t4))\n\n img_feat_with_depth = img_feat_with_depth.permute(0, 1, 3, 4, 5, 2)\n geom_xyz = ((geom_xyz - (self.voxel_coord - self.voxel_size / 2.0)) /\n self.voxel_size).int()\n feature_map = voxel_pooling(geom_xyz, img_feat_with_depth.contiguous(),\n self.voxel_num.cuda())\n if self.times is not None:\n t5.record()\n torch.cuda.synchronize()\n self.times['img_pool'].append(t4.elapsed_time(t5))\n\n if is_return_depth:\n return feature_map.contiguous(), depth\n return feature_map.contiguous()\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n times=None,\n epoch=None,\n timestamps=None,\n is_return_depth=False):\n \"\"\"Forward function.\n\n Args:\n sweep_imgs(Tensor): Input images with shape of (B, num_sweeps,\n num_cameras, 3, H, W).\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n timestamps(Tensor): Timestamp for all images with the shape of(B,\n num_sweeps, num_cameras).\n\n Return:\n Tensor: bev feature map.\n \"\"\"\n self.times = times\n if self.times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n batch_size, num_sweeps, num_cams, num_channels, img_height, \\\n img_width = sweep_imgs.shape\n key_frame_res = self._forward_single_sweep(\n 0,\n sweep_imgs[:, 0:1, ...],\n mats_dict,\n is_return_depth=is_return_depth)\n if self.times is not None:\n t2.record()\n torch.cuda.synchronize()\n self.times['img'].append(t1.elapsed_time(t2))\n\n if num_sweeps == 1:\n if is_return_depth:\n return key_frame_res[0], key_frame_res[1], self.times\n else:\n return key_frame_res, self.times\n\n key_frame_feature = key_frame_res[0] if is_return_depth else key_frame_res\n ret_feature_list = [key_frame_feature]\n for sweep_index in range(1, num_sweeps):\n with torch.no_grad():\n feature_map = self._forward_single_sweep(\n sweep_index,\n sweep_imgs[:, sweep_index:sweep_index + 1, ...],\n mats_dict,\n is_return_depth=False)\n ret_feature_list.append(feature_map)\n\n if is_return_depth:\n return torch.cat(ret_feature_list, 1), key_frame_res[1], self.times\n else:\n return torch.cat(ret_feature_list, 1), self.times" }, { "identifier": "BEVDepthHead", "path": "layers/heads/bev_depth_head_det.py", "snippet": "class BEVDepthHead(CenterHead):\n \"\"\"Head for BevDepth.\n\n Args:\n in_channels(int): Number of channels after bev_neck.\n tasks(dict): Tasks for head.\n bbox_coder(dict): Config of bbox coder.\n common_heads(dict): Config of head for each task.\n loss_cls(dict): Config of classification loss.\n loss_bbox(dict): Config of regression loss.\n gaussian_overlap(float): Gaussian overlap used for `get_targets`.\n min_radius(int): Min radius used for `get_targets`.\n train_cfg(dict): Config used in the training process.\n test_cfg(dict): Config used in the test process.\n bev_backbone_conf(dict): Cnfig of bev_backbone.\n bev_neck_conf(dict): Cnfig of bev_neck.\n \"\"\"\n def __init__(\n self,\n in_channels=256,\n tasks=None,\n bbox_coder=None,\n common_heads=dict(),\n loss_cls=dict(type='GaussianFocalLoss', reduction='mean'),\n loss_bbox=dict(type='L1Loss', reduction='mean', loss_weight=0.25),\n gaussian_overlap=0.1,\n min_radius=2,\n train_cfg=None,\n test_cfg=None,\n bev_backbone_conf=bev_backbone_conf,\n bev_neck_conf=bev_neck_conf,\n separate_head=dict(type='SeparateHead',\n init_bias=-2.19,\n final_kernel=3),\n ):\n super(BEVDepthHead, self).__init__(\n in_channels=in_channels,\n tasks=tasks,\n bbox_coder=bbox_coder,\n common_heads=common_heads,\n loss_cls=loss_cls,\n loss_bbox=loss_bbox,\n separate_head=separate_head,\n )\n self.trunk = build_backbone(bev_backbone_conf)\n self.trunk.init_weights()\n self.neck = build_neck(bev_neck_conf)\n self.neck.init_weights()\n del self.trunk.maxpool\n self.gaussian_overlap = gaussian_overlap\n self.min_radius = min_radius\n self.train_cfg = train_cfg\n self.test_cfg = test_cfg\n\n @autocast(False)\n def forward(self, x, times=None):\n \"\"\"Forward pass.\n\n Args:\n x (list[torch.Tensor]): Multi-level features, e.g.,\n features produced by FPN.\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if times is not None:\n t1 = torch.cuda.Event(enable_timing=True)\n t2 = torch.cuda.Event(enable_timing=True)\n t3 = torch.cuda.Event(enable_timing=True)\n t1.record()\n torch.cuda.synchronize()\n\n # FPN\n trunk_outs = [x]\n if self.trunk.deep_stem:\n x = self.trunk.stem(x)\n else:\n x = self.trunk.conv1(x)\n x = self.trunk.norm1(x)\n x = self.trunk.relu(x)\n for i, layer_name in enumerate(self.trunk.res_layers):\n res_layer = getattr(self.trunk, layer_name)\n x = res_layer(x)\n if i in self.trunk.out_indices:\n trunk_outs.append(x)\n fpn_output = self.neck(trunk_outs)\n\n if times is not None:\n t2.record()\n torch.cuda.synchronize()\n times['head_backbone'].append(t1.elapsed_time(t2))\n\n ret_values = super().forward(fpn_output)\n\n if times is not None:\n t3.record()\n torch.cuda.synchronize()\n times['head_head'].append(t2.elapsed_time(t3))\n times['head'].append(t1.elapsed_time(t3))\n\n return ret_values, times\n\n def get_targets_single(self, gt_bboxes_3d, gt_labels_3d):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n max_objs = self.train_cfg['max_objs'] * self.train_cfg['dense_reg']\n grid_size = torch.tensor(self.train_cfg['grid_size'])\n pc_range = torch.tensor(self.train_cfg['point_cloud_range'])\n voxel_size = torch.tensor(self.train_cfg['voxel_size'])\n\n feature_map_size = grid_size[:2] // self.train_cfg['out_size_factor']\n\n # reorganize the gt_dict by tasks\n task_masks = []\n flag = 0\n for class_name in self.class_names:\n task_masks.append([\n torch.where(gt_labels_3d == class_name.index(i) + flag)\n for i in class_name\n ])\n flag += len(class_name)\n\n task_boxes = []\n task_classes = []\n flag2 = 0\n for idx, mask in enumerate(task_masks):\n task_box = []\n task_class = []\n for m in mask:\n task_box.append(gt_bboxes_3d[m])\n # 0 is background for each task, so we need to add 1 here.\n task_class.append(gt_labels_3d[m] + 1 - flag2)\n task_boxes.append(\n torch.cat(task_box, axis=0).to(gt_bboxes_3d.device))\n task_classes.append(\n torch.cat(task_class).long().to(gt_bboxes_3d.device))\n flag2 += len(mask)\n draw_gaussian = draw_heatmap_gaussian\n heatmaps, anno_boxes, inds, masks = [], [], [], []\n\n for idx, task_head in enumerate(self.task_heads):\n heatmap = gt_bboxes_3d.new_zeros(\n (len(self.class_names[idx]), feature_map_size[1],\n feature_map_size[0]),\n device='cuda')\n\n anno_box = gt_bboxes_3d.new_zeros((max_objs, 10),\n dtype=torch.float32,\n device='cuda')\n\n ind = gt_labels_3d.new_zeros((max_objs),\n dtype=torch.int64,\n device='cuda')\n mask = gt_bboxes_3d.new_zeros((max_objs),\n dtype=torch.uint8,\n device='cuda')\n\n num_objs = min(task_boxes[idx].shape[0], max_objs)\n\n for k in range(num_objs):\n cls_id = task_classes[idx][k] - 1\n\n width = task_boxes[idx][k][3]\n length = task_boxes[idx][k][4]\n width = width / voxel_size[0] / self.train_cfg[\n 'out_size_factor']\n length = length / voxel_size[1] / self.train_cfg[\n 'out_size_factor']\n\n if width > 0 and length > 0:\n radius = gaussian_radius(\n (length, width),\n min_overlap=self.train_cfg['gaussian_overlap'])\n radius = max(self.train_cfg['min_radius'], int(radius))\n\n # be really careful for the coordinate system of\n # your box annotation.\n x, y, z = task_boxes[idx][k][0], task_boxes[idx][k][\n 1], task_boxes[idx][k][2]\n\n coor_x = (\n x - pc_range[0]\n ) / voxel_size[0] / self.train_cfg['out_size_factor']\n coor_y = (\n y - pc_range[1]\n ) / voxel_size[1] / self.train_cfg['out_size_factor']\n\n center = torch.tensor([coor_x, coor_y],\n dtype=torch.float32,\n device='cuda')\n center_int = center.to(torch.int32)\n\n # throw out not in range objects to avoid out of array\n # area when creating the heatmap\n if not (0 <= center_int[0] < feature_map_size[0]\n and 0 <= center_int[1] < feature_map_size[1]):\n continue\n\n draw_gaussian(heatmap[cls_id], center_int, radius)\n\n new_idx = k\n x, y = center_int[0], center_int[1]\n\n assert y * feature_map_size[0] + x < feature_map_size[\n 0] * feature_map_size[1]\n\n ind[new_idx] = y * feature_map_size[0] + x\n mask[new_idx] = 1\n\n vx, vy = task_boxes[idx][k][7:]\n rot = task_boxes[idx][k][6]\n box_dim = task_boxes[idx][k][3:6]\n if self.norm_bbox:\n box_dim = box_dim.log()\n anno_box[new_idx] = torch.cat([\n center - torch.tensor([x, y], device='cuda'),\n z.unsqueeze(0),\n box_dim,\n torch.sin(rot).unsqueeze(0),\n torch.cos(rot).unsqueeze(0),\n vx.unsqueeze(0),\n vy.unsqueeze(0),\n ])\n\n heatmaps.append(heatmap)\n anno_boxes.append(anno_box)\n masks.append(mask)\n inds.append(ind)\n return heatmaps, anno_boxes, inds, masks\n\n def loss(self, targets, preds_dicts, **kwargs):\n \"\"\"Loss function for BEVDepthHead.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n heatmaps, anno_boxes, inds, masks = targets\n return_loss = 0\n return_loss_heatmap, return_loss_bbox = 0, 0\n for task_id, preds_dict in enumerate(preds_dicts):\n # heatmap focal loss\n preds_dict[0]['heatmap'] = clip_sigmoid(preds_dict[0]['heatmap'])\n num_pos = heatmaps[task_id].eq(1).float().sum().item()\n cls_avg_factor = torch.clamp(reduce_mean(\n heatmaps[task_id].new_tensor(num_pos)),\n min=1).item()\n loss_heatmap = self.loss_cls(preds_dict[0]['heatmap'],\n heatmaps[task_id],\n avg_factor=cls_avg_factor)\n target_box = anno_boxes[task_id]\n # reconstruct the anno_box from multiple reg heads\n preds_dict[0]['anno_box'] = torch.cat(\n (\n preds_dict[0]['reg'],\n preds_dict[0]['height'],\n preds_dict[0]['dim'],\n preds_dict[0]['rot'],\n preds_dict[0]['vel'],\n ),\n dim=1,\n )\n\n # Regression loss for dimension, offset, height, rotation\n num = masks[task_id].float().sum()\n ind = inds[task_id]\n pred = preds_dict[0]['anno_box'].permute(0, 2, 3, 1).contiguous()\n pred = pred.view(pred.size(0), -1, pred.size(3))\n pred = self._gather_feat(pred, ind)\n mask = masks[task_id].unsqueeze(2).expand_as(target_box).float()\n num = torch.clamp(reduce_mean(target_box.new_tensor(num)),\n min=1e-4).item()\n isnotnan = (~torch.isnan(target_box)).float()\n mask *= isnotnan\n code_weights = self.train_cfg['code_weights']\n bbox_weights = mask * mask.new_tensor(code_weights)\n loss_bbox = self.loss_bbox(pred,\n target_box,\n bbox_weights,\n avg_factor=num)\n return_loss += loss_bbox\n return_loss += loss_heatmap\n return_loss_bbox += loss_bbox\n return_loss_heatmap += loss_heatmap\n return return_loss, return_loss_heatmap, return_loss_bbox" } ]
import mmcv import torch from torch import nn from layers.backbones.base_lss_fpn import BaseLSSFPN from layers.heads.bev_depth_head_det import BEVDepthHead
6,358
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['BaseBEVDepth'] class BaseBEVDepth(nn.Module): """Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`. Args: backbone_conf (dict): Config of backbone. head_conf (dict): Config of head. """ def __init__(self, backbone_conf, head_conf): super(BaseBEVDepth, self).__init__() self.backbone_img = BaseLSSFPN(**backbone_conf)
logger = mmcv.utils.get_logger('mmdet') logger.setLevel('WARNING') __all__ = ['BaseBEVDepth'] class BaseBEVDepth(nn.Module): """Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`. Args: backbone_conf (dict): Config of backbone. head_conf (dict): Config of head. """ def __init__(self, backbone_conf, head_conf): super(BaseBEVDepth, self).__init__() self.backbone_img = BaseLSSFPN(**backbone_conf)
self.head = BEVDepthHead(**head_conf)
1
2023-12-06 14:57:49+00:00
8k
felixcheng97/AGAP
run.py
[ { "identifier": "utils", "path": "lib/utils.py", "snippet": "def create_optimizer_or_freeze_model(model, cfg_train, global_step):\ndef load_checkpoint(model, optimizer, ckpt_path, no_reload_optimizer):\ndef load_model(model_class, ckpt_path):\ndef rgb_ssim(img0, img1, max_val,\n filter_size=11,\n filter_sigma=1.5,\n k1=0.01,\n k2=0.03,\n return_map=False):\n def convolve2d(z, f):" }, { "identifier": "dvgo", "path": "lib/dvgo.py", "snippet": "class DirectVoxGO(torch.nn.Module):\nclass Raw2Alpha(torch.autograd.Function):\nclass Raw2Alpha_nonuni(torch.autograd.Function):\nclass Alphas2Weights(torch.autograd.Function):\n def __init__(self, xyz_min, xyz_max,\n num_voxels=0, num_voxels_base=0,\n alpha_init=None,\n mask_cache_path=None, mask_cache_thres=1e-3, mask_cache_world_size=None,\n fast_color_thres=0,\n density_type='DenseGrid', k0_type='DenseGrid',\n density_config={}, k0_config={},\n rgbnet_dim=0,\n rgbnet_depth=3, rgbnet_width=128,\n equ_size=(768,1536),\n xyz_config={},\n viewdirs_config={},\n deformation_config={},\n **kwargs):\n def get_k0_grid_rgb(self):\n def _set_equ_resolution(self, equ_size):\n def _set_grid_resolution(self, num_voxels):\n def get_kwargs(self):\n def maskout_near_cam_vox(self, cam_o, near_clip):\n def scale_equ_grid(self, equ_size, upsample):\n def scale_volume_grid(self, num_voxels):\n def update_occupancy_cache(self):\n def voxel_count_views(self, rays_o_tr, rays_d_tr, imsz, near, far, stepsize, downrate=1, irregular_shape=False):\n def density_total_variation_add_grad(self, weight, dense_mode):\n def k0_total_variation_add_grad(self, weight, dense_mode):\n def activate_density(self, density, interval=None):\n def hit_coarse_geo(self, rays_o, rays_d, near, far, stepsize, **render_kwargs):\n def sample_ray(self, rays_o, rays_d, near, far, stepsize, **render_kwargs):\n def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):\n def forward(ctx, density, shift, interval):\n def backward(ctx, grad_back):\n def forward(ctx, density, shift, interval):\n def backward(ctx, grad_back):\n def forward(ctx, alpha, ray_id, N):\n def backward(ctx, grad_weights, grad_last):\ndef get_rays(H, W, K, c2w, inverse_y, flip_x, flip_y, mode='center'):\ndef get_rays_np(H, W, K, c2w):\ndef ndc_rays(H, W, focal, near, rays_o, rays_d):\ndef get_ray_of_a_panorama(H, W, c2w):\ndef get_rays_of_a_view(H, W, K, c2w, ndc, inverse_y, flip_x, flip_y, mode='center'):\ndef get_training_rays_panorama(rgb_tr, train_poses, HW):\ndef get_training_rays(rgb_tr, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):\ndef get_training_rays_flatten(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y):\ndef get_training_rays_in_maskcache_sampling(rgb_tr_ori, train_poses, HW, Ks, ndc, inverse_y, flip_x, flip_y, model, render_kwargs):\ndef batch_indices_generator(N, BS):\n N = len(rays_o)\n H, W = HW[0]\n H, W = HW[0]\n K = Ks[0]\n DEVICE = rgb_tr_ori[0].device\n N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)\n CHUNK = 64\n DEVICE = rgb_tr_ori[0].device\n N = sum(im.shape[0] * im.shape[1] for im in rgb_tr_ori)" }, { "identifier": "dmpigo", "path": "lib/dmpigo.py", "snippet": "class DirectMPIGO(torch.nn.Module):\n def __init__(self, xyz_min, xyz_max,\n num_voxels=0, mpi_depth=0,\n mask_cache_path=None, mask_cache_thres=1e-3, mask_cache_world_size=None,\n fast_color_thres=0,\n density_type='DenseGrid', k0_type='DenseGrid',\n density_config={}, k0_config={},\n rgbnet_dim=9,\n rgbnet_depth=3, rgbnet_width=128,\n image_size=(768,1024),\n xyz_config={},\n viewdirs_config={},\n deformation_config={},\n **kwargs):\n def get_k0_grid_rgb(self):\n def _set_image_resolution(self, image_size):\n def _set_grid_resolution(self, num_voxels, mpi_depth):\n def get_kwargs(self):\n def scale_image_grid(self, image_size, upsample=False):\n def scale_volume_grid(self, num_voxels, mpi_depth):\n def update_occupancy_cache(self):\n def update_occupancy_cache_lt_nviews(self, rays_o_tr, rays_d_tr, imsz, render_kwargs, maskout_lt_nviews):\n def density_total_variation_add_grad(self, weight, dense_mode):\n def k0_total_variation_add_grad(self, weight, dense_mode):\n def activate_density(self, density, interval=None):\n def sample_ray(self, rays_o, rays_d, near, far, stepsize, **render_kwargs):\n def forward(self, rays_o, rays_d, viewdirs, global_step=None, **render_kwargs):\ndef create_full_step_id(shape):\n N = len(rays_o)" }, { "identifier": "dpvgo", "path": "lib/dpvgo.py", "snippet": "class DirectPanoramaVoxGO(nn.Module):\nclass DistortionLoss(torch.autograd.Function):\n def __init__(self, xyz_min, xyz_max,\n num_voxels=0, num_voxels_base=0,\n alpha_init=None,\n mask_cache_world_size=None,\n fast_color_thres=0,\n contracted_norm='l2',\n density_type='DenseGrid', k0_type='DenseGrid',\n density_config={}, k0_config={},\n rgbnet_dim=0,\n rgbnet_depth=3, rgbnet_width=128,\n equ_size=(768,1536),\n xyz_config={},\n viewdirs_config={},\n deformation_config={},\n **kwargs):\n def get_k0_grid_rgb(self):\n def _set_equ_resolution(self, equ_size):\n def _set_grid_resolution(self, num_voxels):\n def get_kwargs(self):\n def scale_equ_grid(self, equ_size, upsample):\n def scale_volume_grid(self, num_voxels):\n def update_occupancy_cache(self):\n def update_occupancy_cache_lt_nviews(self, rays_o_tr, rays_d_tr, imsz, render_kwargs, maskout_lt_nviews):\n def density_total_variation_add_grad(self, weight, dense_mode):\n def k0_total_variation_add_grad(self, weight, dense_mode):\n def activate_density(self, density, interval=None):\n def sample_ray(self, ori_rays_o, ori_rays_d, stepsize, is_train=False, **render_kwargs):\n def forward(self, rays_o, rays_d, viewdirs, global_step=None, is_train=False, **render_kwargs):\n def forward(ctx, w, s, n_max, ray_id):\n def backward(ctx, grad_back):\n N = int(self.world_len / stepsize)\n N = len(rays_o)" }, { "identifier": "load_data", "path": "lib/load_data.py", "snippet": "def load_data(args):\n\n K, depths = None, None\n K_render = None\n near_clip = None\n\n if args.dataset_type == 'replica':\n images, poses, hwf, render_poses, i_split, K = load_replica_data(basedir=args.datadir, movie_render_kwargs=args.movie_render_kwargs)\n print('Loaded replica', images.shape, poses.shape, hwf, args.datadir)\n i_train, i_val, i_test = i_split\n \n near = 0.\n far = 1.0\n print('NEAR FAR', near, far)\n \n elif args.dataset_type == 'llff':\n images, depths, poses, bds, render_poses, i_test = load_llff_data(\n args.datadir, args.factor, args.width, args.height,\n recenter=True, bd_factor=args.bd_factor,\n move_back=args.move_back,\n spherify=args.spherify,\n load_depths=args.load_depths,\n movie_render_kwargs=args.movie_render_kwargs)\n hwf = poses[0,:3,-1]\n poses = poses[:,:3,:4]\n print('Loaded llff', images.shape, render_poses.shape, hwf, args.datadir)\n if not isinstance(i_test, list):\n i_test = [i_test]\n\n if args.llffhold > 0:\n print('Auto LLFF holdout,', args.llffhold)\n i_test = np.arange(images.shape[0])[::args.llffhold]\n\n i_val = i_test\n i_train = np.array([i for i in np.arange(int(images.shape[0])) if\n (i not in i_test and i not in i_val)])\n\n print('DEFINING BOUNDS')\n if args.ndc:\n near = 0.\n far = 1.\n else:\n raise NotImplementedError\n print('NEAR FAR', near, far)\n\n else:\n raise NotImplementedError(f'Unknown dataset type {args.dataset_type} exiting')\n\n # Cast intrinsics to right types\n H, W, focal = hwf\n H, W = int(H), int(W)\n hwf = [H, W, focal]\n HW = np.array([im.shape[:2] for im in images])\n irregular_shape = (images.dtype is np.dtype('object'))\n\n if K is None:\n K = np.array([\n [focal, 0, 0.5*W],\n [0, focal, 0.5*H],\n [0, 0, 1]\n ])\n\n if len(K.shape) == 2:\n Ks = K[None].repeat(len(poses), axis=0)\n else:\n Ks = K\n\n render_poses = render_poses[...,:4]\n if K_render is None:\n K_render = K\n if len(K_render.shape) == 2:\n Ks_render = K_render[None].repeat(len(render_poses), axis=0)\n else:\n Ks_render = K_render\n\n data_dict = dict(\n hwf=hwf, HW=HW, Ks=Ks, Ks_render=Ks_render,\n near=near, far=far, near_clip=near_clip,\n i_train=i_train, i_val=i_val, i_test=i_test,\n poses=poses, render_poses=render_poses,\n images=images, depths=depths,\n irregular_shape=irregular_shape,\n )\n return data_dict" } ]
import os, sys, copy, glob, json, time, random, argparse import math import mmcv import imageio import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import matplotlib.pyplot as plt from shutil import copyfile from tqdm import tqdm, trange from lib import utils, dvgo, dmpigo, dpvgo from lib.load_data import load_data from torch_efficient_distloss import flatten_eff_distloss from PIL import Image
5,166
psnrs.append(p) if eval_ssim: ssims.append(utils.rgb_ssim(rgb, gt_imgs[i], max_val=1)) if len(psnrs): print('Testing psnr', np.mean(psnrs), '(avg)') if eval_ssim: print('Testing ssim', np.mean(ssims), '(avg)') if render_video_flipy: for i in range(len(rgbs)): rgbs[i] = np.flip(rgbs[i], axis=0) depths[i] = np.flip(depths[i], axis=0) bgmaps[i] = np.flip(bgmaps[i], axis=0) if render_video_rot90 != 0: for i in range(len(rgbs)): rgbs[i] = np.rot90(rgbs[i], k=render_video_rot90, axes=(0,1)) depths[i] = np.rot90(depths[i], k=render_video_rot90, axes=(0,1)) bgmaps[i] = np.rot90(bgmaps[i], k=render_video_rot90, axes=(0,1)) if savedir is not None and dump_images: for i in trange(len(rgbs)): rgb8 = utils.to8b(rgbs[i]) filename = os.path.join(savedir, '{:03d}.png'.format(i)) imageio.imwrite(filename, rgb8) rgbs = np.array(rgbs) depths = np.array(depths) bgmaps = np.array(bgmaps) return rgbs, depths, bgmaps def seed_everything(): '''Seed everything for better reproducibility. (some pytorch operation is non-deterministic like the backprop of grid_samples) ''' torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) def load_everything(args, cfg): '''Load images / poses / camera settings / data split. ''' data_dict = load_data(cfg.data) # remove useless field kept_keys = { 'hwf', 'HW', 'Ks', 'Ks_render', 'near', 'far', 'near_clip', 'i_train', 'i_val', 'i_test', 'irregular_shape', 'poses', 'render_poses', 'images'} for k in list(data_dict.keys()): if k not in kept_keys: data_dict.pop(k) # construct data tensor if data_dict['irregular_shape']: data_dict['images'] = [torch.FloatTensor(im, device='cpu') for im in data_dict['images']] else: data_dict['images'] = torch.FloatTensor(data_dict['images'], device='cpu') data_dict['poses'] = torch.Tensor(data_dict['poses']) return data_dict def _compute_bbox_by_cam_frustrm_bounded(cfg, HW, Ks, poses, i_train, near, far): xyz_min = torch.Tensor([np.inf, np.inf, np.inf]) xyz_max = -xyz_min for (H, W), K, c2w in zip(HW[i_train], Ks[i_train], poses[i_train]): rays_o, rays_d, viewdirs = dvgo.get_rays_of_a_view( H=H, W=W, K=K, c2w=c2w, ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y, flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y) if cfg.data.ndc: pts_nf = torch.stack([rays_o+rays_d*near, rays_o+rays_d*far]) else: pts_nf = torch.stack([rays_o+viewdirs*near, rays_o+viewdirs*far]) xyz_min = torch.minimum(xyz_min, pts_nf.amin((0,1,2))) xyz_max = torch.maximum(xyz_max, pts_nf.amax((0,1,2))) return xyz_min, xyz_max def compute_bbox_by_cam_frustrm(args, cfg, HW, Ks, poses, i_train, near, far, **kwargs): print('compute_bbox_by_cam_frustrm: start') if cfg.data.panorama: xyz_min, xyz_max = -torch.tensor([far, far, far]).float(), torch.tensor([far, far, far]).float() else: xyz_min, xyz_max = _compute_bbox_by_cam_frustrm_bounded( cfg, HW, Ks, poses, i_train, near, far) print('compute_bbox_by_cam_frustrm: xyz_min', xyz_min) print('compute_bbox_by_cam_frustrm: xyz_max', xyz_max) print('compute_bbox_by_cam_frustrm: finish') return xyz_min, xyz_max def create_new_model(cfg, cfg_model, cfg_train, xyz_min, xyz_max, stage, coarse_ckpt_path): model_kwargs = copy.deepcopy(cfg_model) num_voxels = model_kwargs.pop('num_voxels') if len(cfg_train.pg_scale): num_voxels = int(num_voxels / (2**len(cfg_train.pg_scale))) if cfg.fine_model_and_render.image_size: image_size = model_kwargs.pop('image_size') if len(cfg_train.pg_image_scale): image_size = (image_size[0] // (2**len(cfg_train.pg_image_scale)), image_size[1] // (2**len(cfg_train.pg_image_scale))) model_kwargs['image_size'] = image_size if cfg.fine_model_and_render.equ_size: equ_size = model_kwargs.pop('equ_size') if len(cfg_train.pg_equ_scale): equ_size = (equ_size[0] // (2**len(cfg_train.pg_equ_scale)), equ_size[1] // (2**len(cfg_train.pg_equ_scale))) model_kwargs['equ_size'] = equ_size if cfg.data.ndc: print(f'scene_rep_reconstruction ({stage}): \033[96muse multiplane images\033[0m') model = dmpigo.DirectMPIGO( xyz_min=xyz_min, xyz_max=xyz_max, num_voxels=num_voxels, **model_kwargs) elif cfg.data.panorama:
def config_parser(): '''Define command line arguments ''' parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--config', required=True, help='config file path') parser.add_argument("--seed", type=int, default=777, help='Random seed') parser.add_argument("--no_reload", action='store_true', help='do not reload weights from saved ckpt') parser.add_argument("--no_reload_optimizer", action='store_true', help='do not reload optimizer state from saved ckpt') parser.add_argument("--ft_path", type=str, default='', help='specific weights npy file to reload for coarse network') # testing options parser.add_argument("--render_only", action='store_true', help='do not optimize, reload weights and render out render_poses path') parser.add_argument("--render_test", action='store_true') parser.add_argument("--render_train", action='store_true') parser.add_argument("--render_video", action='store_true') parser.add_argument("--render_image", action='store_true') parser.add_argument("--render_video_flipy", action='store_true') parser.add_argument("--render_video_rot90", default=0, type=int) parser.add_argument("--render_video_factor", type=float, default=0, help='downsampling factor to speed up rendering, set 4 or 8 for fast preview') parser.add_argument("--dump_images", action='store_true') parser.add_argument("--eval_ssim", action='store_true') parser.add_argument("--edit", type=str, default='', help='filename of edited k0_xxx.png') parser.add_argument("--render_panorama", action='store_true') # logging/saving options parser.add_argument("--i_print", type=int, default=500, help='frequency of console printout and metric loggin') parser.add_argument("--i_weights", type=int, default=100000, help='frequency of weight ckpt saving') return parser @torch.no_grad() def render_viewpoints(model, render_poses, HW, Ks, ndc, render_kwargs, gt_imgs=None, savedir=None, dump_images=False, render_factor=0, render_video_flipy=False, render_video_rot90=0, eval_ssim=False, render_panorama=False): '''Render images for the given viewpoints; run evaluation if gt given. ''' assert len(render_poses) == len(HW) and len(HW) == len(Ks) if render_factor!=0: HW = np.copy(HW) Ks = np.copy(Ks) HW = (HW/render_factor).astype(int) Ks[:, :2, :3] /= render_factor rgbs = [] depths = [] bgmaps = [] psnrs = [] ssims = [] for i, c2w in enumerate(tqdm(render_poses)): H, W = HW[i] K = Ks[i] c2w = torch.Tensor(c2w) if not render_panorama: rays_o, rays_d, viewdirs = dvgo.get_rays_of_a_view( H, W, K, c2w, ndc, inverse_y=render_kwargs['inverse_y'], flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y) else: rays_o, rays_d, viewdirs = dvgo.get_ray_of_a_panorama( H, W, c2w ) keys = ['rgb_marched', 'depth', 'alphainv_last'] rays_o = rays_o.flatten(0,-2) rays_d = rays_d.flatten(0,-2) viewdirs = viewdirs.flatten(0,-2) render_result_chunks = [ {k: v for k, v in model(ro, rd, vd, **render_kwargs).items() if k in keys} for ro, rd, vd in zip(rays_o.split(8192, 0), rays_d.split(8192, 0), viewdirs.split(8192, 0)) ] render_result = { k: torch.cat([ret[k] for ret in render_result_chunks]).reshape(H,W,-1) for k in render_result_chunks[0].keys() } rgb = render_result['rgb_marched'].cpu().numpy() depth = render_result['depth'].cpu().numpy() bgmap = render_result['alphainv_last'].cpu().numpy() rgbs.append(rgb) depths.append(depth) bgmaps.append(bgmap) if i==0: print('Testing', rgb.shape) if gt_imgs is not None and render_factor==0: p = -10. * np.log10(np.mean(np.square(rgb - gt_imgs[i]))) psnrs.append(p) if eval_ssim: ssims.append(utils.rgb_ssim(rgb, gt_imgs[i], max_val=1)) if len(psnrs): print('Testing psnr', np.mean(psnrs), '(avg)') if eval_ssim: print('Testing ssim', np.mean(ssims), '(avg)') if render_video_flipy: for i in range(len(rgbs)): rgbs[i] = np.flip(rgbs[i], axis=0) depths[i] = np.flip(depths[i], axis=0) bgmaps[i] = np.flip(bgmaps[i], axis=0) if render_video_rot90 != 0: for i in range(len(rgbs)): rgbs[i] = np.rot90(rgbs[i], k=render_video_rot90, axes=(0,1)) depths[i] = np.rot90(depths[i], k=render_video_rot90, axes=(0,1)) bgmaps[i] = np.rot90(bgmaps[i], k=render_video_rot90, axes=(0,1)) if savedir is not None and dump_images: for i in trange(len(rgbs)): rgb8 = utils.to8b(rgbs[i]) filename = os.path.join(savedir, '{:03d}.png'.format(i)) imageio.imwrite(filename, rgb8) rgbs = np.array(rgbs) depths = np.array(depths) bgmaps = np.array(bgmaps) return rgbs, depths, bgmaps def seed_everything(): '''Seed everything for better reproducibility. (some pytorch operation is non-deterministic like the backprop of grid_samples) ''' torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) def load_everything(args, cfg): '''Load images / poses / camera settings / data split. ''' data_dict = load_data(cfg.data) # remove useless field kept_keys = { 'hwf', 'HW', 'Ks', 'Ks_render', 'near', 'far', 'near_clip', 'i_train', 'i_val', 'i_test', 'irregular_shape', 'poses', 'render_poses', 'images'} for k in list(data_dict.keys()): if k not in kept_keys: data_dict.pop(k) # construct data tensor if data_dict['irregular_shape']: data_dict['images'] = [torch.FloatTensor(im, device='cpu') for im in data_dict['images']] else: data_dict['images'] = torch.FloatTensor(data_dict['images'], device='cpu') data_dict['poses'] = torch.Tensor(data_dict['poses']) return data_dict def _compute_bbox_by_cam_frustrm_bounded(cfg, HW, Ks, poses, i_train, near, far): xyz_min = torch.Tensor([np.inf, np.inf, np.inf]) xyz_max = -xyz_min for (H, W), K, c2w in zip(HW[i_train], Ks[i_train], poses[i_train]): rays_o, rays_d, viewdirs = dvgo.get_rays_of_a_view( H=H, W=W, K=K, c2w=c2w, ndc=cfg.data.ndc, inverse_y=cfg.data.inverse_y, flip_x=cfg.data.flip_x, flip_y=cfg.data.flip_y) if cfg.data.ndc: pts_nf = torch.stack([rays_o+rays_d*near, rays_o+rays_d*far]) else: pts_nf = torch.stack([rays_o+viewdirs*near, rays_o+viewdirs*far]) xyz_min = torch.minimum(xyz_min, pts_nf.amin((0,1,2))) xyz_max = torch.maximum(xyz_max, pts_nf.amax((0,1,2))) return xyz_min, xyz_max def compute_bbox_by_cam_frustrm(args, cfg, HW, Ks, poses, i_train, near, far, **kwargs): print('compute_bbox_by_cam_frustrm: start') if cfg.data.panorama: xyz_min, xyz_max = -torch.tensor([far, far, far]).float(), torch.tensor([far, far, far]).float() else: xyz_min, xyz_max = _compute_bbox_by_cam_frustrm_bounded( cfg, HW, Ks, poses, i_train, near, far) print('compute_bbox_by_cam_frustrm: xyz_min', xyz_min) print('compute_bbox_by_cam_frustrm: xyz_max', xyz_max) print('compute_bbox_by_cam_frustrm: finish') return xyz_min, xyz_max def create_new_model(cfg, cfg_model, cfg_train, xyz_min, xyz_max, stage, coarse_ckpt_path): model_kwargs = copy.deepcopy(cfg_model) num_voxels = model_kwargs.pop('num_voxels') if len(cfg_train.pg_scale): num_voxels = int(num_voxels / (2**len(cfg_train.pg_scale))) if cfg.fine_model_and_render.image_size: image_size = model_kwargs.pop('image_size') if len(cfg_train.pg_image_scale): image_size = (image_size[0] // (2**len(cfg_train.pg_image_scale)), image_size[1] // (2**len(cfg_train.pg_image_scale))) model_kwargs['image_size'] = image_size if cfg.fine_model_and_render.equ_size: equ_size = model_kwargs.pop('equ_size') if len(cfg_train.pg_equ_scale): equ_size = (equ_size[0] // (2**len(cfg_train.pg_equ_scale)), equ_size[1] // (2**len(cfg_train.pg_equ_scale))) model_kwargs['equ_size'] = equ_size if cfg.data.ndc: print(f'scene_rep_reconstruction ({stage}): \033[96muse multiplane images\033[0m') model = dmpigo.DirectMPIGO( xyz_min=xyz_min, xyz_max=xyz_max, num_voxels=num_voxels, **model_kwargs) elif cfg.data.panorama:
model = dpvgo.DirectPanoramaVoxGO(
3
2023-12-11 05:49:46+00:00
8k
KAIST-VICLab/From_Ground_To_Objects
datasets/kitti_dataset.py
[ { "identifier": "generate_depth_map", "path": "datasets/kitti_utils.py", "snippet": "def generate_depth_map(calib_dir, velo_filename, cam=2, vel_depth=False):\n \"\"\"Generate a depth map from velodyne data\n \"\"\"\n # load calibration files\n cam2cam = read_calib_file(os.path.join(calib_dir, 'calib_cam_to_cam.txt'))\n velo2cam = read_calib_file(os.path.join(calib_dir, 'calib_velo_to_cam.txt'))\n velo2cam = np.hstack((velo2cam['R'].reshape(3, 3), velo2cam['T'][..., np.newaxis]))\n velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))\n\n # get image shape\n im_shape = cam2cam[\"S_rect_02\"][::-1].astype(np.int32)\n\n # compute projection matrix velodyne->image plane\n R_cam2rect = np.eye(4)\n R_cam2rect[:3, :3] = cam2cam['R_rect_00'].reshape(3, 3)\n P_rect = cam2cam['P_rect_0'+str(cam)].reshape(3, 4)\n P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)\n\n # load velodyne points and remove all behind image plane (approximation)\n # each row of the velodyne data is forward, left, up, reflectance\n velo = load_velodyne_points(velo_filename)\n velo = velo[velo[:, 0] >= 0, :]\n\n # project the points to the camera\n velo_pts_im = np.dot(P_velo2im, velo.T).T\n velo_pts_im[:, :2] = velo_pts_im[:, :2] / velo_pts_im[:, 2][..., np.newaxis]\n\n if vel_depth:\n velo_pts_im[:, 2] = velo[:, 0]\n\n # check if in bounds\n # use minus 1 to get the exact same value as KITTI matlab code\n velo_pts_im[:, 0] = np.round(velo_pts_im[:, 0]) - 1\n velo_pts_im[:, 1] = np.round(velo_pts_im[:, 1]) - 1\n val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)\n val_inds = val_inds & (velo_pts_im[:, 0] < im_shape[1]) & (velo_pts_im[:, 1] < im_shape[0])\n velo_pts_im = velo_pts_im[val_inds, :]\n\n # project to image\n depth = np.zeros((im_shape[:2]))\n depth[velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)] = velo_pts_im[:, 2]\n\n # find the duplicate points and choose the closest depth\n inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])\n dupe_inds = [item for item, count in Counter(inds).items() if count > 1]\n for dd in dupe_inds:\n pts = np.where(inds == dd)[0]\n x_loc = int(velo_pts_im[pts[0], 0])\n y_loc = int(velo_pts_im[pts[0], 1])\n depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()\n depth[depth < 0] = 0\n\n return depth" }, { "identifier": "MonoDataset", "path": "datasets/mono_dataset.py", "snippet": "class MonoDataset(data.Dataset):\n \"\"\"Superclass for monocular dataloaders\n \"\"\"\n def __init__(self,\n data_path,\n filenames,\n height,\n width,\n frame_idxs,\n num_scales,\n is_train=False,\n img_ext='.png',\n mask_noise=False,\n demo=False\n ):\n super(MonoDataset, self).__init__()\n\n self.data_path = data_path\n self.filenames = filenames\n self.height = height\n self.width = width\n self.num_scales = num_scales\n\n self.interp = Image.ANTIALIAS\n\n self.frame_idxs = frame_idxs\n\n self.is_train = is_train\n self.img_ext = img_ext\n\n self.loader = pil_loader\n self.to_tensor = transforms.ToTensor()\n self.mask_noise = mask_noise\n self.demo = demo\n\n # We need to specify augmentations differently in newer versions of torchvision.\n # We first try the newer tuple version; if this fails we fall back to scalars\n try:\n self.brightness = (0.8, 1.2)\n self.contrast = (0.8, 1.2)\n self.saturation = (0.8, 1.2)\n self.hue = (-0.1, 0.1)\n transforms.ColorJitter.get_params(\n self.brightness, self.contrast, self.saturation, self.hue)\n except TypeError:\n self.brightness = 0.2\n self.contrast = 0.2\n self.saturation = 0.2\n self.hue = 0.1\n\n self.resize = {}\n for i in range(self.num_scales):\n s = 2 ** i\n self.resize[i] = transforms.Resize((self.height // s, self.width // s),\n interpolation=self.interp)\n\n self.load_depth = self.check_depth()\n\n def preprocess(self, inputs, color_aug):\n \"\"\"Resize colour images to the required scales and augment if required\n\n We create the color_aug object in advance and apply the same augmentation to all\n images in this item. This ensures that all images input to the pose network receive the\n same augmentation.\n \"\"\"\n for k in list(inputs):\n if \"color\" in k:\n n, im, i = k\n for i in range(self.num_scales):\n inputs[(n, im, i)] = self.resize[i](inputs[(n, im, i - 1)])\n\n for k in list(inputs):\n f = inputs[k]\n if \"color\" in k:\n n, im, i = k\n inputs[(n, im, i)] = self.to_tensor(f)\n # check it isn't a blank frame - keep _aug as zeros so we can check for it\n if inputs[(n, im, i)].sum() == 0:\n inputs[(n + \"_aug\", im, i)] = inputs[(n, im, i)]\n else:\n inputs[(n + \"_aug\", im, i)] = self.to_tensor(color_aug(f))\n\n if self.mask_noise:\n inputs[\"doj_mask\"] = self.resize[0](inputs[\"doj_mask\"]); inputs[\"doj_mask\"] = self.to_tensor(inputs[\"doj_mask\"])\n if type(self).__name__ in [\"CityscapesPreprocessedDataset\", \"KITTIRAWDataset\"]:\n inputs[\"doj_mask-1\"] = self.resize[0](inputs[\"doj_mask-1\"]); inputs[\"doj_mask-1\"] = self.to_tensor(inputs[\"doj_mask-1\"])\n inputs[\"doj_mask+1\"] = self.resize[0](inputs[\"doj_mask+1\"]); inputs[\"doj_mask+1\"] = self.to_tensor(inputs[\"doj_mask+1\"])\n\n\n def __len__(self):\n return len(self.filenames)\n\n def load_intrinsics(self, folder, frame_index):\n return self.K.copy()\n\n def __getitem__(self, index):\n \"\"\"Returns a single training item from the dataset as a dictionary.\n\n Values correspond to torch tensors.\n Keys in the dictionary are either strings or tuples:\n\n (\"color\", <frame_id>, <scale>) for raw colour images,\n (\"color_aug\", <frame_id>, <scale>) for augmented colour images,\n (\"K\", scale) or (\"inv_K\", scale) for camera intrinsics,\n \"depth_gt\" for ground truth depth maps\n\n <frame_id> is:\n an integer (e.g. 0, -1, or 1) representing the temporal step relative to 'index',\n\n <scale> is an integer representing the scale of the image relative to the fullsize image:\n -1 images at native resolution as loaded from disk\n 0 images resized to (self.width, self.height )\n 1 images resized to (self.width // 2, self.height // 2)\n 2 images resized to (self.width // 4, self.height // 4)\n 3 images resized to (self.width // 8, self.height // 8)\n \"\"\"\n inputs = {}\n\n do_color_aug = self.is_train and random.random() > 0.5\n do_flip = self.is_train and random.random() > 0.5\n inputs[\"flip\"] = do_flip\n\n folder, frame_index, side = self.index_to_folder_and_frame_idx(index)\n\n poses = {}\n if type(self).__name__ in [\"CityscapesPreprocessedDataset\", \"CityscapesEvalDataset\"]:\n inputs.update(self.get_colors(folder, frame_index, side, do_flip))\n if type(self).__name__ in [\"CityscapesPreprocessedDataset\"]:\n inputs[(\"color_path\", 0)] = self.get_color_path(\n folder, frame_index)\n if self.mask_noise:\n inputs.update(self.get_doj_mask(folder, frame_index, side, do_flip))\n else:\n for i in self.frame_idxs:\n if i == \"s\":\n other_side = {\"r\": \"l\", \"l\": \"r\"}[side]\n inputs[(\"color\", i, -1)] = self.get_color(\n folder, frame_index, other_side, do_flip)\n inputs[(\"color_path\", i)] = self.get_color_path(\n folder, frame_index, other_side)\n else:\n try:\n inputs[(\"color\", i, -1)] = self.get_color(\n folder, frame_index + i, side, do_flip)\n inputs[(\"color_path\", i)] = self.get_color_path(\n folder, frame_index, side)\n except FileNotFoundError as e:\n if i != 0:\n # fill with dummy values\n inputs[(\"color\", i, -1)] = \\\n Image.fromarray(np.zeros((100, 100, 3)).astype(np.uint8))\n poses[i] = None\n inputs[(\"color_path\", i)] = \"\"\n # print(\"no file!!!!\")\n else:\n raise FileNotFoundError(f'Cannot find frame - make sure your '\n f'--data_path is set correctly, or try adding'\n f' the --png flag. {e}')\n\n # adjusting intrinsics to match each scale in the pyramid\n for scale in range(self.num_scales):\n K = self.load_intrinsics(folder, frame_index)\n\n K[0, :] *= self.width // (2 ** scale)\n K[1, :] *= self.height // (2 ** scale)\n\n inv_K = np.linalg.pinv(K)\n\n inputs[(\"K\", scale)] = torch.from_numpy(K)\n inputs[(\"inv_K\", scale)] = torch.from_numpy(inv_K)\n\n if do_color_aug:\n color_aug = transforms.ColorJitter(self.brightness, self.contrast, self.saturation, self.hue)\n else:\n color_aug = (lambda x: x)\n\n self.preprocess(inputs, color_aug)\n\n if type(self).__name__ not in [\"CityscapesEvalDataset\"]:\n for i in self.frame_idxs:\n del inputs[(\"color\", i, -1)]\n del inputs[(\"color_aug\", i, -1)]\n\n if self.load_depth and False:\n depth_gt = self.get_depth(folder, frame_index, side, do_flip)\n inputs[\"depth_gt\"] = np.expand_dims(depth_gt, 0)\n inputs[\"depth_gt\"] = torch.from_numpy(inputs[\"depth_gt\"].astype(np.float32))\n\n return inputs\n\n def get_color(self, folder, frame_index, side, do_flip):\n raise NotImplementedError\n\n def check_depth(self):\n raise NotImplementedError\n\n def get_depth(self, folder, frame_index, side, do_flip):\n raise NotImplementedError" } ]
import os import skimage.transform import numpy as np import PIL.Image as pil from .kitti_utils import generate_depth_map from .mono_dataset import MonoDataset
3,825
# Copyright Niantic 2021. Patent Pending. All rights reserved. # # This software is licensed under the terms of the ManyDepth licence # which allows for non-commercial use only, the full terms of which are made # available in the LICENSE file. os.environ["MKL_NUM_THREADS"] = "1" # noqa F402 os.environ["NUMEXPR_NUM_THREADS"] = "1" # noqa F402 os.environ["OMP_NUM_THREADS"] = "1" # noqa F402 class KITTIDataset(MonoDataset): """Superclass for different types of KITTI dataset loaders """ def __init__(self, *args, **kwargs): super(KITTIDataset, self).__init__(*args, **kwargs) # NOTE: Make sure your intrinsics matrix is *normalized* by the original image size self.K = np.array([[0.58, 0, 0.5, 0], [0, 1.92, 0.5, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float32) self.full_res_shape = (1242, 375) self.side_map = {"2": 2, "3": 3, "l": 2, "r": 3} def check_depth(self): line = self.filenames[0].split() scene_name = line[0] frame_index = int(line[1]) velo_filename = os.path.join( self.data_path, scene_name, "velodyne_points/data/{:010d}.bin".format(int(frame_index))) return os.path.isfile(velo_filename) def index_to_folder_and_frame_idx(self, index): """Convert index in the dataset to a folder name, frame_idx and any other bits """ line = self.filenames[index].split() folder = line[0] if len(line) == 3: frame_index = int(line[1]) else: frame_index = 0 if len(line) == 3: side = line[2] else: side = None return folder, frame_index, side def get_color(self, folder, frame_index, side, do_flip): color = self.loader(self.get_image_path(folder, frame_index, side)) if do_flip: color = color.transpose(pil.FLIP_LEFT_RIGHT) return color # added part!! def get_color_path(self, folder, frame_index, side): # return self.get_image_path(folder, frame_index, side) f_str = "{:010d}".format(frame_index) image_path = os.path.join(folder, "image_0{}/data".format(self.side_map[side]), f_str) return image_path class KITTIRAWDataset(KITTIDataset): """KITTI dataset which loads the original velodyne depth maps for ground truth """ def __init__(self, *args, **kwargs): super(KITTIRAWDataset, self).__init__(*args, **kwargs) def get_image_path(self, folder, frame_index, side): f_str = "{:010d}{}".format(frame_index, self.img_ext) image_path = os.path.join( self.data_path, folder, "image_0{}/data".format(self.side_map[side]), f_str) return image_path def get_depth(self, folder, frame_index, side, do_flip): calib_path = os.path.join(self.data_path, folder.split("/")[0]) velo_filename = os.path.join( self.data_path, folder, "velodyne_points/data/{:010d}.bin".format(int(frame_index)))
# Copyright Niantic 2021. Patent Pending. All rights reserved. # # This software is licensed under the terms of the ManyDepth licence # which allows for non-commercial use only, the full terms of which are made # available in the LICENSE file. os.environ["MKL_NUM_THREADS"] = "1" # noqa F402 os.environ["NUMEXPR_NUM_THREADS"] = "1" # noqa F402 os.environ["OMP_NUM_THREADS"] = "1" # noqa F402 class KITTIDataset(MonoDataset): """Superclass for different types of KITTI dataset loaders """ def __init__(self, *args, **kwargs): super(KITTIDataset, self).__init__(*args, **kwargs) # NOTE: Make sure your intrinsics matrix is *normalized* by the original image size self.K = np.array([[0.58, 0, 0.5, 0], [0, 1.92, 0.5, 0], [0, 0, 1, 0], [0, 0, 0, 1]], dtype=np.float32) self.full_res_shape = (1242, 375) self.side_map = {"2": 2, "3": 3, "l": 2, "r": 3} def check_depth(self): line = self.filenames[0].split() scene_name = line[0] frame_index = int(line[1]) velo_filename = os.path.join( self.data_path, scene_name, "velodyne_points/data/{:010d}.bin".format(int(frame_index))) return os.path.isfile(velo_filename) def index_to_folder_and_frame_idx(self, index): """Convert index in the dataset to a folder name, frame_idx and any other bits """ line = self.filenames[index].split() folder = line[0] if len(line) == 3: frame_index = int(line[1]) else: frame_index = 0 if len(line) == 3: side = line[2] else: side = None return folder, frame_index, side def get_color(self, folder, frame_index, side, do_flip): color = self.loader(self.get_image_path(folder, frame_index, side)) if do_flip: color = color.transpose(pil.FLIP_LEFT_RIGHT) return color # added part!! def get_color_path(self, folder, frame_index, side): # return self.get_image_path(folder, frame_index, side) f_str = "{:010d}".format(frame_index) image_path = os.path.join(folder, "image_0{}/data".format(self.side_map[side]), f_str) return image_path class KITTIRAWDataset(KITTIDataset): """KITTI dataset which loads the original velodyne depth maps for ground truth """ def __init__(self, *args, **kwargs): super(KITTIRAWDataset, self).__init__(*args, **kwargs) def get_image_path(self, folder, frame_index, side): f_str = "{:010d}{}".format(frame_index, self.img_ext) image_path = os.path.join( self.data_path, folder, "image_0{}/data".format(self.side_map[side]), f_str) return image_path def get_depth(self, folder, frame_index, side, do_flip): calib_path = os.path.join(self.data_path, folder.split("/")[0]) velo_filename = os.path.join( self.data_path, folder, "velodyne_points/data/{:010d}.bin".format(int(frame_index)))
depth_gt = generate_depth_map(calib_path, velo_filename, self.side_map[side])
0
2023-12-12 08:29:30+00:00
8k
LIU-Yuxin/SyncMVD
src/renderer/project.py
[ { "identifier": "HardGeometryShader", "path": "src/renderer/geometry.py", "snippet": "class HardGeometryShader(ShaderBase):\n\t\"\"\"\n\trenders common geometric informations.\n\t\n\t\n\t\"\"\"\n\n\tdef forward(self, fragments, meshes, **kwargs):\n\t\tcameras = super()._get_cameras(**kwargs)\n\t\ttexels = self.texel_from_uv(fragments, meshes)\n\n\t\tlights = kwargs.get(\"lights\", self.lights)\n\t\tmaterials = kwargs.get(\"materials\", self.materials)\n\t\tblend_params = kwargs.get(\"blend_params\", self.blend_params)\n\t\tverts, normals, depths, cos_angles = _geometry_shading_with_pixels(\n\t\t\tmeshes=meshes,\n\t\t\tfragments=fragments,\n\t\t\ttexels=texels,\n\t\t\tlights=lights,\n\t\t\tcameras=cameras,\n\t\t\tmaterials=materials,\n\t\t)\n\t\tverts = hard_rgb_blend(verts, fragments, blend_params)\n\t\tnormals = hard_rgb_blend(normals, fragments, blend_params)\n\t\tdepths = hard_rgb_blend(depths, fragments, blend_params)\n\t\tcos_angles = hard_rgb_blend(cos_angles, fragments, blend_params)\n\t\ttexels = hard_rgb_blend(texels, fragments, blend_params)\n\t\treturn verts, normals, depths, cos_angles, texels, fragments\n\n\tdef texel_from_uv(self, fragments, meshes):\n\t\ttexture_tmp = meshes.textures\n\t\tmaps_tmp = texture_tmp.maps_padded()\n\t\tuv_color = [ [[1,0],[1,1]],[[0,0],[0,1]] ]\n\t\tuv_color = torch.FloatTensor(uv_color).to(maps_tmp[0].device).type(maps_tmp[0].dtype)\n\t\tuv_texture = TexturesUV([uv_color.clone() for t in maps_tmp], texture_tmp.faces_uvs_padded(), texture_tmp.verts_uvs_padded(), sampling_mode=\"bilinear\")\n\t\tmeshes.textures = uv_texture\n\t\ttexels = meshes.sample_textures(fragments)\n\t\tmeshes.textures = texture_tmp\n\t\ttexels = torch.cat((texels, texels[...,-1:]*0), dim=-1)\n\t\treturn texels" }, { "identifier": "HardNChannelFlatShader", "path": "src/renderer/shader.py", "snippet": "class HardNChannelFlatShader(ShaderBase):\n\t\"\"\"\n\tPer face lighting - the lighting model is applied using the average face\n\tposition and the face normal. The blending function hard assigns\n\tthe color of the closest face for each pixel.\n\n\tTo use the default values, simply initialize the shader with the desired\n\tdevice e.g.\n\n\t.. code-block::\n\n\t\tshader = HardFlatShader(device=torch.device(\"cuda:0\"))\n\t\"\"\"\n\n\tdef __init__(\n\t\tself,\n\t\tdevice = \"cpu\",\n\t\tcameras: Optional[TensorProperties] = None,\n\t\tlights: Optional[TensorProperties] = None,\n\t\tmaterials: Optional[Materials] = None,\n\t\tblend_params: Optional[BlendParams] = None,\n\t\tchannels: int = 3,\n\t):\n\t\tself.channels = channels\n\t\tones = ((1.0,)*channels,)\n\t\tzeros = ((0.0,)*channels,)\n\t\t\n\t\tif not isinstance(lights, AmbientLights) or not lights.ambient_color.shape[-1] == channels:\n\t\t\tlights = AmbientLights(\n\t\t\t\tambient_color=ones,\n\t\t\t\tdevice=device,\n\t\t\t)\n\n\t\tif not materials or not materials.ambient_color.shape[-1] == channels:\n\t\t\tmaterials = Materials(\n\t\t\t\tdevice=device,\n\t\t\t\tdiffuse_color=zeros,\n\t\t\t\tambient_color=ones,\n\t\t\t\tspecular_color=zeros,\n\t\t\t\tshininess=0.0,\n\t\t\t)\n\n\t\tblend_params_new = BlendParams(background_color=(1.0,)*channels)\n\t\tif not isinstance(blend_params, BlendParams):\n\t\t\tblend_params = blend_params_new\n\t\telse:\n\t\t\tbackground_color_ = blend_params.background_color\n\t\t\tif isinstance(background_color_, Sequence[float]) and not len(background_color_) == channels:\n\t\t\t\tblend_params = blend_params_new\n\t\t\tif isinstance(background_color_, torch.Tensor) and not background_color_.shape[-1] == channels:\n\t\t\t\tblend_params = blend_params_new\n\n\t\tsuper().__init__(\n\t\t\tdevice,\n\t\t\tcameras,\n\t\t\tlights,\n\t\t\tmaterials,\n\t\t\tblend_params,\n\t\t)\n\t\t\n\n\tdef forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor:\n\t\tcameras = super()._get_cameras(**kwargs)\n\t\ttexels = meshes.sample_textures(fragments)\n\t\tlights = kwargs.get(\"lights\", self.lights)\n\t\tmaterials = kwargs.get(\"materials\", self.materials)\n\t\tblend_params = kwargs.get(\"blend_params\", self.blend_params)\n\t\tcolors = flat_shading(\n\t\t\tmeshes=meshes,\n\t\t\tfragments=fragments,\n\t\t\ttexels=texels,\n\t\t\tlights=lights,\n\t\t\tcameras=cameras,\n\t\t\tmaterials=materials,\n\t\t)\n\t\timages = hard_rgb_blend(colors, fragments, blend_params)\n\t\treturn images" }, { "identifier": "voronoi_solve", "path": "src/renderer/voronoi.py", "snippet": "def voronoi_solve(texture, mask):\n '''\n This is a warpper of the original cupy voronoi implementation\n The texture color where mask value is 1 will propagate to its\n neighbors.\n args:\n texture - A multi-channel tensor, (H, W, C)\n mask - A single-channel tensor, (H, W)\n return:\n texture - Propagated tensor\n '''\n h, w, c = texture.shape\n # hwc_texture = texture.permute(1,2,0)\n valid_pix_coord = torch.where(mask>0)\n\n indices = torch.arange(0, h*w).cuda().reshape(h, w)\n idx_map = -1 * torch.ones((h,w), dtype=torch.int64).cuda()\n idx_map[valid_pix_coord] = indices[valid_pix_coord]\n\n ping = cp.asarray(idx_map)\n pong = cp.copy(ping)\n ping = JFAVoronoiDiagram(ping, pong)\n\n voronoi_map = torch.as_tensor(ping, device=\"cuda\")\n nc_voronoi_texture = torch.index_select(texture.reshape(h*w, c), 0, voronoi_map.reshape(h*w))\n voronoi_texture = nc_voronoi_texture.reshape(h, w, c)\n\n return voronoi_texture" } ]
import torch import pytorch3d import xatlas import numpy as np from pytorch3d.io import load_objs_as_meshes, load_obj, save_obj, IO from pytorch3d.structures import Meshes from pytorch3d.renderer import ( look_at_view_transform, FoVPerspectiveCameras, FoVOrthographicCameras, AmbientLights, PointLights, DirectionalLights, Materials, RasterizationSettings, MeshRenderer, MeshRasterizer, TexturesUV ) from .geometry import HardGeometryShader from .shader import HardNChannelFlatShader from .voronoi import voronoi_solve from pytorch3d.io.experimental_gltf_io import MeshGlbFormat
4,136
verts_uvs_list = mesh.textures.verts_uvs_list() # faces_list = [torch.flip(faces, [-1]) for faces in mesh.faces_list()] new_verts_list = [] for i, (verts, verts_uv) in enumerate(zip(verts_list, verts_uvs_list)): verts = verts.clone() verts_uv = verts_uv.clone() verts[...,0:2] = verts_uv[...,:] verts = (verts - 0.5) * 2 verts[...,2] *= 1 new_verts_list.append(verts) textures_uv = mesh.textures.clone() self.mesh_uv = Meshes(new_verts_list, mesh.faces_list(), textures_uv) return self.mesh_uv # Set texture for the current mesh. def set_texture_map(self, texture): new_map = texture.permute(1, 2, 0) new_map = new_map.to(self.device) new_tex = TexturesUV( [new_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode ) self.mesh.textures = new_tex # Set the initial normal noise texture # No generator here for replication of the experiment result. Add one as you wish def set_noise_texture(self, channels=None): if not channels: channels = self.channels noise_texture = torch.normal(0, 1, (channels,) + self.target_size, device=self.device) self.set_texture_map(noise_texture) return noise_texture # Set the cameras given the camera poses and centers def set_cameras(self, camera_poses, centers=None, camera_distance=2.7, scale=None): elev = torch.FloatTensor([pose[0] for pose in camera_poses]) azim = torch.FloatTensor([pose[1] for pose in camera_poses]) R, T = look_at_view_transform(dist=camera_distance, elev=elev, azim=azim, at=centers or ((0,0,0),)) self.cameras = FoVOrthographicCameras(device=self.device, R=R, T=T, scale_xyz=scale or ((1,1,1),)) # Set all necessary internal data for rendering and texture baking # Can be used to refresh after changing camera positions def set_cameras_and_render_settings(self, camera_poses, centers=None, camera_distance=2.7, render_size=None, scale=None): self.set_cameras(camera_poses, centers, camera_distance, scale=scale) if render_size is None: render_size = self.render_size if not hasattr(self, "renderer"): self.setup_renderer(size=render_size) if not hasattr(self, "mesh_d"): self.disconnect_faces() if not hasattr(self, "mesh_uv"): self.construct_uv_mesh() self.calculate_tex_gradient() self.calculate_visible_triangle_mask() _,_,_,cos_maps,_, _ = self.render_geometry() self.calculate_cos_angle_weights(cos_maps) # Setup renderers for rendering # max faces per bin set to 30000 to avoid overflow in many test cases. # You can use default value to let pytorch3d handle that for you. def setup_renderer(self, size=64, blur=0.0, face_per_pix=1, perspective_correct=False, channels=None): if not channels: channels = self.channels self.raster_settings = RasterizationSettings( image_size=size, blur_radius=blur, faces_per_pixel=face_per_pix, perspective_correct=perspective_correct, cull_backfaces=True, max_faces_per_bin=30000, ) self.renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=self.cameras, raster_settings=self.raster_settings, ), shader=HardNChannelFlatShader( device=self.device, cameras=self.cameras, lights=self.lights, channels=channels # materials=materials ) ) # Bake screen-space cosine weights to UV space # May be able to reimplement using the generic "bake_texture" function, but it works so leave it here for now @torch.enable_grad() def calculate_cos_angle_weights(self, cos_angles, fill=True, channels=None): if not channels: channels = self.channels cos_maps = [] tmp_mesh = self.mesh.clone() for i in range(len(self.cameras)): zero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True) optimizer = torch.optim.SGD([zero_map], lr=1, momentum=0) optimizer.zero_grad() zero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode) tmp_mesh.textures = zero_tex images_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights) loss = torch.sum((cos_angles[i,:,:,0:1]**1 - images_predicted)**2) loss.backward() optimizer.step() if fill: zero_map = zero_map.detach() / (self.gradient_maps[i] + 1E-8)
# Pytorch3D based renderering functions, managed in a class # Render size is recommended to be the same as your latent view size # DO NOT USE "bilinear" sampling when you are handling latents. # Stable Diffusion has 4 latent channels so use channels=4 class UVProjection(): def __init__(self, texture_size=96, render_size=64, sampling_mode="nearest", channels=3, device=None): self.channels = channels self.device = device or torch.device("cpu") self.lights = AmbientLights(ambient_color=((1.0,)*channels,), device=self.device) self.target_size = (texture_size,texture_size) self.render_size = render_size self.sampling_mode = sampling_mode # Load obj mesh, rescale the mesh to fit into the bounding box def load_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False): mesh = load_objs_as_meshes([mesh_path], device=self.device) if auto_center: verts = mesh.verts_packed() max_bb = (verts - 0).max(0)[0] min_bb = (verts - 0).min(0)[0] scale = (max_bb - min_bb).max()/2 center = (max_bb+min_bb) /2 mesh.offset_verts_(-center) mesh.scale_verts_((scale_factor / float(scale))) else: mesh.scale_verts_((scale_factor)) if autouv or (mesh.textures is None): mesh = self.uv_unwrap(mesh) self.mesh = mesh def load_glb_mesh(self, mesh_path, scale_factor=2.0, auto_center=True, autouv=False): io = IO() io.register_meshes_format(MeshGlbFormat()) with open(mesh_path, "rb") as f: mesh = io.load_mesh(f, include_textures=True, device=self.device) if auto_center: verts = mesh.verts_packed() max_bb = (verts - 0).max(0)[0] min_bb = (verts - 0).min(0)[0] scale = (max_bb - min_bb).max()/2 center = (max_bb+min_bb) /2 mesh.offset_verts_(-center) mesh.scale_verts_((scale_factor / float(scale))) else: mesh.scale_verts_((scale_factor)) if autouv or (mesh.textures is None): mesh = self.uv_unwrap(mesh) self.mesh = mesh # Save obj mesh def save_mesh(self, mesh_path, texture): save_obj(mesh_path, self.mesh.verts_list()[0], self.mesh.faces_list()[0], verts_uvs= self.mesh.textures.verts_uvs_list()[0], faces_uvs= self.mesh.textures.faces_uvs_list()[0], texture_map=texture) # Code referred to TEXTure code (https://github.com/TEXTurePaper/TEXTurePaper.git) def uv_unwrap(self, mesh): verts_list = mesh.verts_list()[0] faces_list = mesh.faces_list()[0] v_np = verts_list.cpu().numpy() f_np = faces_list.int().cpu().numpy() atlas = xatlas.Atlas() atlas.add_mesh(v_np, f_np) chart_options = xatlas.ChartOptions() chart_options.max_iterations = 4 atlas.generate(chart_options=chart_options) vmapping, ft_np, vt_np = atlas[0] # [N], [M, 3], [N, 2] vt = torch.from_numpy(vt_np.astype(np.float32)).type(verts_list.dtype).to(mesh.device) ft = torch.from_numpy(ft_np.astype(np.int64)).type(faces_list.dtype).to(mesh.device) new_map = torch.zeros(self.target_size+(self.channels,), device=mesh.device) new_tex = TexturesUV( [new_map], [ft], [vt], sampling_mode=self.sampling_mode ) mesh.textures = new_tex return mesh ''' A functions that disconnect faces in the mesh according to its UV seams. The number of vertices are made equal to the number of unique vertices its UV layout, while the faces list is intact. ''' def disconnect_faces(self): mesh = self.mesh verts_list = mesh.verts_list() faces_list = mesh.faces_list() verts_uvs_list = mesh.textures.verts_uvs_list() faces_uvs_list = mesh.textures.faces_uvs_list() packed_list = [v[f] for v,f in zip(verts_list, faces_list)] verts_disconnect_list = [ torch.zeros( (verts_uvs_list[i].shape[0], 3), dtype=verts_list[0].dtype, device=verts_list[0].device ) for i in range(len(verts_list))] for i in range(len(verts_list)): verts_disconnect_list[i][faces_uvs_list] = packed_list[i] assert not mesh.has_verts_normals(), "Not implemented for vertex normals" self.mesh_d = Meshes(verts_disconnect_list, faces_uvs_list, mesh.textures) return self.mesh_d ''' A function that construct a temp mesh for back-projection. Take a disconnected mesh and a rasterizer, the function calculates the projected faces as the UV, as use its original UV with pseudo z value as world space geometry. ''' def construct_uv_mesh(self): mesh = self.mesh_d verts_list = mesh.verts_list() verts_uvs_list = mesh.textures.verts_uvs_list() # faces_list = [torch.flip(faces, [-1]) for faces in mesh.faces_list()] new_verts_list = [] for i, (verts, verts_uv) in enumerate(zip(verts_list, verts_uvs_list)): verts = verts.clone() verts_uv = verts_uv.clone() verts[...,0:2] = verts_uv[...,:] verts = (verts - 0.5) * 2 verts[...,2] *= 1 new_verts_list.append(verts) textures_uv = mesh.textures.clone() self.mesh_uv = Meshes(new_verts_list, mesh.faces_list(), textures_uv) return self.mesh_uv # Set texture for the current mesh. def set_texture_map(self, texture): new_map = texture.permute(1, 2, 0) new_map = new_map.to(self.device) new_tex = TexturesUV( [new_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode ) self.mesh.textures = new_tex # Set the initial normal noise texture # No generator here for replication of the experiment result. Add one as you wish def set_noise_texture(self, channels=None): if not channels: channels = self.channels noise_texture = torch.normal(0, 1, (channels,) + self.target_size, device=self.device) self.set_texture_map(noise_texture) return noise_texture # Set the cameras given the camera poses and centers def set_cameras(self, camera_poses, centers=None, camera_distance=2.7, scale=None): elev = torch.FloatTensor([pose[0] for pose in camera_poses]) azim = torch.FloatTensor([pose[1] for pose in camera_poses]) R, T = look_at_view_transform(dist=camera_distance, elev=elev, azim=azim, at=centers or ((0,0,0),)) self.cameras = FoVOrthographicCameras(device=self.device, R=R, T=T, scale_xyz=scale or ((1,1,1),)) # Set all necessary internal data for rendering and texture baking # Can be used to refresh after changing camera positions def set_cameras_and_render_settings(self, camera_poses, centers=None, camera_distance=2.7, render_size=None, scale=None): self.set_cameras(camera_poses, centers, camera_distance, scale=scale) if render_size is None: render_size = self.render_size if not hasattr(self, "renderer"): self.setup_renderer(size=render_size) if not hasattr(self, "mesh_d"): self.disconnect_faces() if not hasattr(self, "mesh_uv"): self.construct_uv_mesh() self.calculate_tex_gradient() self.calculate_visible_triangle_mask() _,_,_,cos_maps,_, _ = self.render_geometry() self.calculate_cos_angle_weights(cos_maps) # Setup renderers for rendering # max faces per bin set to 30000 to avoid overflow in many test cases. # You can use default value to let pytorch3d handle that for you. def setup_renderer(self, size=64, blur=0.0, face_per_pix=1, perspective_correct=False, channels=None): if not channels: channels = self.channels self.raster_settings = RasterizationSettings( image_size=size, blur_radius=blur, faces_per_pixel=face_per_pix, perspective_correct=perspective_correct, cull_backfaces=True, max_faces_per_bin=30000, ) self.renderer = MeshRenderer( rasterizer=MeshRasterizer( cameras=self.cameras, raster_settings=self.raster_settings, ), shader=HardNChannelFlatShader( device=self.device, cameras=self.cameras, lights=self.lights, channels=channels # materials=materials ) ) # Bake screen-space cosine weights to UV space # May be able to reimplement using the generic "bake_texture" function, but it works so leave it here for now @torch.enable_grad() def calculate_cos_angle_weights(self, cos_angles, fill=True, channels=None): if not channels: channels = self.channels cos_maps = [] tmp_mesh = self.mesh.clone() for i in range(len(self.cameras)): zero_map = torch.zeros(self.target_size+(channels,), device=self.device, requires_grad=True) optimizer = torch.optim.SGD([zero_map], lr=1, momentum=0) optimizer.zero_grad() zero_tex = TexturesUV([zero_map], self.mesh.textures.faces_uvs_padded(), self.mesh.textures.verts_uvs_padded(), sampling_mode=self.sampling_mode) tmp_mesh.textures = zero_tex images_predicted = self.renderer(tmp_mesh, cameras=self.cameras[i], lights=self.lights) loss = torch.sum((cos_angles[i,:,:,0:1]**1 - images_predicted)**2) loss.backward() optimizer.step() if fill: zero_map = zero_map.detach() / (self.gradient_maps[i] + 1E-8)
zero_map = voronoi_solve(zero_map, self.gradient_maps[i][...,0])
2
2023-12-09 03:27:58+00:00
8k
jinxixiang/magic_animate_unofficial
animatediff/magic_animate/unet_3d_blocks.py
[ { "identifier": "Transformer3DModel", "path": "animatediff/magic_animate/attention.py", "snippet": "class Transformer3DModel(ModelMixin, ConfigMixin):\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n use_linear_projection: bool = False,\n only_cross_attention: bool = False,\n upcast_attention: bool = False,\n\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n ):\n super().__init__()\n self.use_linear_projection = use_linear_projection\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n # Define input layers\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n if use_linear_projection:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n else:\n self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)\n\n # Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n num_embeds_ada_norm=num_embeds_ada_norm,\n attention_bias=attention_bias,\n only_cross_attention=only_cross_attention,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n )\n for d in range(num_layers)\n ]\n )\n\n # 4. Define output layers\n if use_linear_projection:\n self.proj_out = nn.Linear(in_channels, inner_dim)\n else:\n self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)\n\n def forward(self, hidden_states, encoder_hidden_states=None, timestep=None, return_dict: bool = True):\n # Input\n assert hidden_states.dim() == 5, f\"Expected hidden_states to have ndim=5, but got ndim={hidden_states.dim()}.\"\n video_length = hidden_states.shape[2]\n hidden_states = rearrange(hidden_states, \"b c f h w -> (b f) c h w\")\n # JH: need not repeat when a list of prompts are given \n if encoder_hidden_states.shape[0] != hidden_states.shape[0]:\n encoder_hidden_states = repeat(encoder_hidden_states, 'b n c -> (b f) n c', f=video_length)\n\n batch, channel, height, weight = hidden_states.shape\n residual = hidden_states\n\n hidden_states = self.norm(hidden_states)\n if not self.use_linear_projection:\n hidden_states = self.proj_in(hidden_states)\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)\n else:\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)\n hidden_states = self.proj_in(hidden_states)\n\n # Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n video_length=video_length\n )\n\n # Output\n if not self.use_linear_projection:\n hidden_states = (\n hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()\n )\n hidden_states = self.proj_out(hidden_states)\n else:\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()\n )\n\n output = hidden_states + residual\n\n output = rearrange(output, \"(b f) c h w -> b c f h w\", f=video_length)\n if not return_dict:\n return (output,)\n\n return Transformer3DModelOutput(sample=output)" }, { "identifier": "Downsample3D", "path": "animatediff/magic_animate/resnet.py", "snippet": "class Downsample3D(nn.Module):\n def __init__(self, channels, use_conv=False, out_channels=None, padding=1, name=\"conv\"):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.padding = padding\n stride = 2\n self.name = name\n\n if use_conv:\n self.conv = InflatedConv3d(self.channels, self.out_channels, 3, stride=stride, padding=padding)\n else:\n raise NotImplementedError\n\n def forward(self, hidden_states):\n assert hidden_states.shape[1] == self.channels\n if self.use_conv and self.padding == 0:\n raise NotImplementedError\n\n assert hidden_states.shape[1] == self.channels\n hidden_states = self.conv(hidden_states)\n\n return hidden_states" }, { "identifier": "ResnetBlock3D", "path": "animatediff/magic_animate/resnet.py", "snippet": "class ResnetBlock3D(nn.Module):\n def __init__(\n self,\n *,\n in_channels,\n out_channels=None,\n conv_shortcut=False,\n dropout=0.0,\n temb_channels=512,\n groups=32,\n groups_out=None,\n pre_norm=True,\n eps=1e-6,\n non_linearity=\"swish\",\n time_embedding_norm=\"default\",\n output_scale_factor=1.0,\n use_in_shortcut=None,\n ):\n super().__init__()\n self.pre_norm = pre_norm\n self.pre_norm = True\n self.in_channels = in_channels\n out_channels = in_channels if out_channels is None else out_channels\n self.out_channels = out_channels\n self.use_conv_shortcut = conv_shortcut\n self.time_embedding_norm = time_embedding_norm\n self.output_scale_factor = output_scale_factor\n\n if groups_out is None:\n groups_out = groups\n\n self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)\n\n self.conv1 = InflatedConv3d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n if temb_channels is not None:\n if self.time_embedding_norm == \"default\":\n time_emb_proj_out_channels = out_channels\n elif self.time_embedding_norm == \"scale_shift\":\n time_emb_proj_out_channels = out_channels * 2\n else:\n raise ValueError(f\"unknown time_embedding_norm : {self.time_embedding_norm} \")\n\n self.time_emb_proj = torch.nn.Linear(temb_channels, time_emb_proj_out_channels)\n else:\n self.time_emb_proj = None\n\n self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)\n self.dropout = torch.nn.Dropout(dropout)\n self.conv2 = InflatedConv3d(out_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n if non_linearity == \"swish\":\n self.nonlinearity = lambda x: F.silu(x)\n elif non_linearity == \"mish\":\n self.nonlinearity = Mish()\n elif non_linearity == \"silu\":\n self.nonlinearity = nn.SiLU()\n\n self.use_in_shortcut = self.in_channels != self.out_channels if use_in_shortcut is None else use_in_shortcut\n\n self.conv_shortcut = None\n if self.use_in_shortcut:\n self.conv_shortcut = InflatedConv3d(in_channels, out_channels, kernel_size=1, stride=1, padding=0)\n\n def forward(self, input_tensor, temb):\n hidden_states = input_tensor\n\n hidden_states = self.norm1(hidden_states)\n hidden_states = self.nonlinearity(hidden_states)\n\n hidden_states = self.conv1(hidden_states)\n\n if temb is not None:\n temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None, None]\n\n if temb is not None and self.time_embedding_norm == \"default\":\n hidden_states = hidden_states + temb\n\n hidden_states = self.norm2(hidden_states)\n\n if temb is not None and self.time_embedding_norm == \"scale_shift\":\n scale, shift = torch.chunk(temb, 2, dim=1)\n hidden_states = hidden_states * (1 + scale) + shift\n\n hidden_states = self.nonlinearity(hidden_states)\n\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.conv2(hidden_states)\n\n if self.conv_shortcut is not None:\n input_tensor = self.conv_shortcut(input_tensor)\n\n output_tensor = (input_tensor + hidden_states) / self.output_scale_factor\n\n return output_tensor" }, { "identifier": "Upsample3D", "path": "animatediff/magic_animate/resnet.py", "snippet": "class Upsample3D(nn.Module):\n def __init__(self, channels, use_conv=False, use_conv_transpose=False, out_channels=None, name=\"conv\"):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.use_conv_transpose = use_conv_transpose\n self.name = name\n\n conv = None\n if use_conv_transpose:\n raise NotImplementedError\n elif use_conv:\n self.conv = InflatedConv3d(self.channels, self.out_channels, 3, padding=1)\n\n def forward(self, hidden_states, output_size=None):\n assert hidden_states.shape[1] == self.channels\n\n if self.use_conv_transpose:\n raise NotImplementedError\n\n # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16\n dtype = hidden_states.dtype\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(torch.float32)\n\n # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984\n if hidden_states.shape[0] >= 64:\n hidden_states = hidden_states.contiguous()\n\n # if `output_size` is passed we force the interpolation output\n # size and do not make use of `scale_factor=2`\n if output_size is None:\n hidden_states = F.interpolate(hidden_states, scale_factor=[1.0, 2.0, 2.0], mode=\"nearest\")\n else:\n hidden_states = F.interpolate(hidden_states, size=output_size, mode=\"nearest\")\n\n # If the input is bfloat16, we cast back to bfloat16\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(dtype)\n\n hidden_states = self.conv(hidden_states)\n\n return hidden_states" }, { "identifier": "get_motion_module", "path": "animatediff/magic_animate/motion_module.py", "snippet": "def get_motion_module(\n in_channels,\n motion_module_type: str, \n motion_module_kwargs: dict\n):\n if motion_module_type == \"Vanilla\":\n return VanillaTemporalModule(in_channels=in_channels, **motion_module_kwargs,) \n else:\n raise ValueError" } ]
import torch from torch import nn from .attention import Transformer3DModel from .resnet import Downsample3D, ResnetBlock3D, Upsample3D from .motion_module import get_motion_module
4,512
unet_use_cross_frame_attention=unet_use_cross_frame_attention, unet_use_temporal_attention=unet_use_temporal_attention, use_motion_module=use_motion_module, motion_module_type=motion_module_type, motion_module_kwargs=motion_module_kwargs, ) raise ValueError(f"{down_block_type} does not exist.") def get_up_block( up_block_type, num_layers, in_channels, out_channels, prev_output_channel, temb_channels, add_upsample, resnet_eps, resnet_act_fn, attn_num_head_channels, resnet_groups=None, cross_attention_dim=None, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift="default", unet_use_cross_frame_attention=None, unet_use_temporal_attention=None, use_motion_module=None, motion_module_type=None, motion_module_kwargs=None, ): up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type if up_block_type == "UpBlock3D": return UpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, use_motion_module=use_motion_module, motion_module_type=motion_module_type, motion_module_kwargs=motion_module_kwargs, ) elif up_block_type == "CrossAttnUpBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D") return CrossAttnUpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, unet_use_cross_frame_attention=unet_use_cross_frame_attention, unet_use_temporal_attention=unet_use_temporal_attention, use_motion_module=use_motion_module, motion_module_type=motion_module_type, motion_module_kwargs=motion_module_kwargs, ) raise ValueError(f"{up_block_type} does not exist.") class UNetMidBlock3DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, cross_attention_dim=1280, dual_cross_attention=False, use_linear_projection=False, upcast_attention=False, unet_use_cross_frame_attention=None, unet_use_temporal_attention=None, use_motion_module=None, motion_module_type=None, motion_module_kwargs=None, ): super().__init__() self.has_cross_attention = True self.attn_num_head_channels = attn_num_head_channels resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # there is always at least one resnet resnets = [
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/guoyww/AnimateDiff # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_down_block( down_block_type, num_layers, in_channels, out_channels, temb_channels, add_downsample, resnet_eps, resnet_act_fn, attn_num_head_channels, resnet_groups=None, cross_attention_dim=None, downsample_padding=None, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift="default", unet_use_cross_frame_attention=None, unet_use_temporal_attention=None, use_motion_module=None, motion_module_type=None, motion_module_kwargs=None, ): down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type if down_block_type == "DownBlock3D": return DownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, use_motion_module=use_motion_module, motion_module_type=motion_module_type, motion_module_kwargs=motion_module_kwargs, ) elif down_block_type == "CrossAttnDownBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D") return CrossAttnDownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, unet_use_cross_frame_attention=unet_use_cross_frame_attention, unet_use_temporal_attention=unet_use_temporal_attention, use_motion_module=use_motion_module, motion_module_type=motion_module_type, motion_module_kwargs=motion_module_kwargs, ) raise ValueError(f"{down_block_type} does not exist.") def get_up_block( up_block_type, num_layers, in_channels, out_channels, prev_output_channel, temb_channels, add_upsample, resnet_eps, resnet_act_fn, attn_num_head_channels, resnet_groups=None, cross_attention_dim=None, dual_cross_attention=False, use_linear_projection=False, only_cross_attention=False, upcast_attention=False, resnet_time_scale_shift="default", unet_use_cross_frame_attention=None, unet_use_temporal_attention=None, use_motion_module=None, motion_module_type=None, motion_module_kwargs=None, ): up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type if up_block_type == "UpBlock3D": return UpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, use_motion_module=use_motion_module, motion_module_type=motion_module_type, motion_module_kwargs=motion_module_kwargs, ) elif up_block_type == "CrossAttnUpBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D") return CrossAttnUpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, attn_num_head_channels=attn_num_head_channels, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, unet_use_cross_frame_attention=unet_use_cross_frame_attention, unet_use_temporal_attention=unet_use_temporal_attention, use_motion_module=use_motion_module, motion_module_type=motion_module_type, motion_module_kwargs=motion_module_kwargs, ) raise ValueError(f"{up_block_type} does not exist.") class UNetMidBlock3DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, attn_num_head_channels=1, output_scale_factor=1.0, cross_attention_dim=1280, dual_cross_attention=False, use_linear_projection=False, upcast_attention=False, unet_use_cross_frame_attention=None, unet_use_temporal_attention=None, use_motion_module=None, motion_module_type=None, motion_module_kwargs=None, ): super().__init__() self.has_cross_attention = True self.attn_num_head_channels = attn_num_head_channels resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # there is always at least one resnet resnets = [
ResnetBlock3D(
2
2023-12-12 00:16:39+00:00
8k
Chat-3D/Chat-3D-v2
models/chat3d.py
[ { "identifier": "LlamaForCausalLM", "path": "models/modeling_llama.py", "snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n self.model = LlamaModel(config)\n\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n query_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you consciours? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you consciours? Can you talk to me?\\nI'm not consciours, but I can talk to you.\"\n ```\"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n query_embeds=query_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n\n if self.lm_head.weight.dtype == torch.float32:\n hidden_states = hidden_states.float()\n logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, query_embeds=None, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n query_embeds = None\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"query_embeds\": query_embeds,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)\n return reordered_past" }, { "identifier": "TransformerEncoder", "path": "models/transformer_vanilla/transformer_block.py", "snippet": "class TransformerEncoder(nn.Module):\n def __init__(self, dim, num_layers=1, heads=32, dim_head=None, dropout=0.1):\n super().__init__()\n self.block_list = [BasicTransformerBlock(dim, heads, dim_head, dropout) for _ in range(num_layers)]\n self.layers = nn.ModuleList(self.block_list)\n self.output_norm = nn.LayerNorm(dim)\n self.apply(self._init_weights)\n\n def forward(self, x, mask=None, dist_attn=None):\n for layer in self.layers:\n x = layer(x, mask=mask, dist_attn=dist_attn)\n # x = self.output_norm(x)\n return x\n\n def _init_weights(self, module):\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.bias is not None:\n module.bias.data.zero_()" }, { "identifier": "CMT", "path": "models/transformer_vanilla/transformer_block.py", "snippet": "class CMT(nn.Module):\n\n def __init__(self, hidden_size, num_layers=1):\n super().__init__()\n\n decoder_layer = TransformerSpatialDecoderLayer(\n d_model=hidden_size, nhead=8, dim_head=64,\n dim_feedforward=4096, dropout=0.1\n )\n self.layers = _get_clones(decoder_layer, num_layers)\n\n loc_layer = nn.Sequential(\n nn.Linear(6, hidden_size),\n nn.ReLU(),\n nn.LayerNorm(hidden_size)\n )\n self.loc_layers = _get_clones(loc_layer, 1)\n\n self.apply(self._init_weights)\n\n def _init_weights(self, module):\n \"\"\"Initialize the weights\"\"\"\n if isinstance(module, nn.Linear):\n # Slightly different from the TF version which uses truncated_normal for initialization\n # cf https://github.com/pytorch/pytorch/pull/5617\n module.weight.data.normal_(mean=0.0, std=0.01)\n if module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def calc_pairwise_locs(self, obj_centers, eps=1e-10, pairwise_rel_type='center'):\n pairwise_locs = einops.repeat(obj_centers, 'b l d -> b l 1 d') \\\n - einops.repeat(obj_centers, 'b l d -> b 1 l d')\n pairwise_dists = torch.sqrt(torch.sum(pairwise_locs ** 2, 3) + eps) # (b, l, l)\n\n # max_dists = torch.max(pairwise_dists.view(pairwise_dists.size(0), -1), dim=1)[0]\n norm_pairwise_dists = pairwise_dists #/ einops.repeat(max_dists, 'b -> b 1 1')\n\n pairwise_dists_2d = torch.sqrt(torch.sum(pairwise_locs[..., :2] ** 2, 3) + eps)\n pairwise_locs = torch.stack(\n [norm_pairwise_dists, pairwise_locs[..., 2] / pairwise_dists,\n pairwise_dists_2d / pairwise_dists, pairwise_locs[..., 1] / pairwise_dists_2d,\n pairwise_locs[..., 0] / pairwise_dists_2d],\n dim=3\n )\n return pairwise_locs\n\n def forward(\n self, obj_embeds, obj_locs, obj_masks\n ):\n pairwise_locs = self.calc_pairwise_locs(\n obj_locs[:, :, :3]\n )\n\n out_embeds = obj_embeds\n for i, layer in enumerate(self.layers):\n # query_pos = self.loc_layers[0](obj_locs)\n # out_embeds = out_embeds + query_pos\n out_embeds = layer(\n out_embeds, pairwise_locs,\n tgt_key_padding_mask=obj_masks.logical_not(),\n )\n\n return out_embeds" }, { "identifier": "GenericMLP", "path": "models/helpers.py", "snippet": "class GenericMLP(nn.Module):\n def __init__(\n self,\n input_dim,\n hidden_dims,\n output_dim,\n norm_fn_name=None,\n activation=\"silu\",\n use_conv=False,\n dropout=None,\n hidden_use_bias=False,\n output_use_bias=True,\n output_use_activation=False,\n output_use_norm=False,\n weight_init_name=None,\n weight_init_std=0.02\n ):\n super().__init__()\n activation = ACTIVATION_DICT[activation]\n norm = None\n if norm_fn_name is not None:\n norm = NORM_DICT[norm_fn_name]\n if norm_fn_name == \"ln\" and use_conv:\n norm = lambda x: nn.GroupNorm(1, x) # easier way to use LayerNorm\n\n if dropout is not None:\n if not isinstance(dropout, list):\n dropout = [dropout for _ in range(len(hidden_dims))]\n\n layers = []\n prev_dim = input_dim\n for idx, x in enumerate(hidden_dims):\n if use_conv:\n layer = nn.Conv1d(prev_dim, x, 1, bias=hidden_use_bias)\n else:\n layer = nn.Linear(prev_dim, x, bias=hidden_use_bias)\n layers.append(layer)\n if norm:\n layers.append(norm(x))\n layers.append(activation())\n if dropout is not None:\n layers.append(nn.Dropout(p=dropout[idx]))\n prev_dim = x\n if use_conv:\n layer = nn.Conv1d(prev_dim, output_dim, 1, bias=output_use_bias)\n else:\n layer = nn.Linear(prev_dim, output_dim, bias=output_use_bias)\n layers.append(layer)\n\n if output_use_norm:\n layers.append(norm(output_dim))\n\n if output_use_activation:\n layers.append(activation())\n\n self.layers = nn.Sequential(*layers)\n # self.weight_init_std = weight_init_std\n # self.apply(self._init_weights)\n\n def _init_weights(self, module):\n std = self.weight_init_std\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=std)\n if module.bias is not None:\n module.bias.data.zero_()\n\n # if weight_init_name is not None:\n # self.do_weight_init(weight_init_name)\n #\n # def do_weight_init(self, weight_init_name):\n # func = WEIGHT_INIT_DICT[weight_init_name]\n # for (_, param) in self.named_parameters():\n # if param.dim() > 1: # skips batchnorm/layernorm\n # func(param)\n\n def forward(self, x):\n output = self.layers(x)\n return output" }, { "identifier": "PositionEmbeddingCoordsSine", "path": "models/position_embedding.py", "snippet": "class PositionEmbeddingCoordsSine(nn.Module):\n def __init__(\n self,\n temperature=10000,\n normalize=False,\n scale=None,\n pos_type=\"fourier\",\n d_pos=None,\n d_in=3,\n gauss_scale=1.0,\n ):\n super().__init__()\n self.temperature = temperature\n self.normalize = normalize\n if scale is not None and normalize is False:\n raise ValueError(\"normalize should be True if scale is passed\")\n if scale is None:\n scale = 2 * math.pi\n assert pos_type in [\"sine\", \"fourier\"]\n self.pos_type = pos_type\n self.scale = scale\n if pos_type == \"fourier\":\n assert d_pos is not None\n assert d_pos % 2 == 0\n # define a gaussian matrix input_ch -> output_ch\n B = torch.empty((d_in, d_pos // 2)).normal_()\n B *= gauss_scale\n self.register_buffer(\"gauss_B\", B)\n self.d_pos = d_pos\n\n def get_sine_embeddings(self, xyz, num_channels, input_range):\n # clone coords so that shift/scale operations do not affect original tensor\n orig_xyz = xyz\n xyz = orig_xyz.clone()\n\n ncoords = xyz.shape[1]\n # if self.normalize:\n # xyz = shift_scale_points(xyz, src_range=input_range)\n\n ndim = num_channels // xyz.shape[2]\n if ndim % 2 != 0:\n ndim -= 1\n # automatically handle remainder by assiging it to the first dim\n rems = num_channels - (ndim * xyz.shape[2])\n\n assert (\n ndim % 2 == 0\n ), f\"Cannot handle odd sized ndim={ndim} where num_channels={num_channels} and xyz={xyz.shape}\"\n\n final_embeds = []\n prev_dim = 0\n\n for d in range(xyz.shape[2]):\n cdim = ndim\n if rems > 0:\n # add remainder in increments of two to maintain even size\n cdim += 2\n rems -= 2\n\n if cdim != prev_dim:\n dim_t = torch.arange(cdim, dtype=torch.float32, device=xyz.device)\n dim_t = self.temperature ** (2 * (dim_t // 2) / cdim)\n\n # create batch x cdim x nccords embedding\n raw_pos = xyz[:, :, d]\n if self.scale:\n raw_pos *= self.scale\n pos = raw_pos[:, :, None] / dim_t\n pos = torch.stack(\n (pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), dim=3\n ).flatten(2)\n final_embeds.append(pos)\n prev_dim = cdim\n\n final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1)\n return final_embeds\n\n def get_fourier_embeddings(self, xyz, num_channels=None, input_range=None):\n # Follows - https://people.eecs.berkeley.edu/~bmild/fourfeat/index.html\n\n if num_channels is None:\n num_channels = self.gauss_B.shape[1] * 2\n\n bsize, npoints = xyz.shape[0], xyz.shape[1]\n assert num_channels > 0 and num_channels % 2 == 0\n d_in, max_d_out = self.gauss_B.shape[0], self.gauss_B.shape[1]\n d_out = num_channels // 2\n assert d_out <= max_d_out\n assert d_in == xyz.shape[-1]\n\n # clone coords so that shift/scale operations do not affect original tensor\n orig_xyz = xyz\n xyz = orig_xyz.clone()\n\n ncoords = xyz.shape[1]\n # if self.normalize:\n # xyz = shift_scale_points(xyz, src_range=input_range)\n\n xyz *= 2 * np.pi\n xyz_proj = torch.mm(xyz.view(-1, d_in), self.gauss_B[:, :d_out]).view(\n bsize, npoints, d_out\n )\n final_embeds = [xyz_proj.sin(), xyz_proj.cos()]\n\n # return batch x d_pos x npoints embedding\n final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1)\n return final_embeds\n\n def forward(self, xyz, num_channels=None, input_range=None):\n assert isinstance(xyz, torch.Tensor)\n assert xyz.ndim == 3\n # xyz is batch x npoints x 3\n if self.pos_type == \"sine\":\n with torch.no_grad():\n return self.get_sine_embeddings(xyz, num_channels, input_range)\n elif self.pos_type == \"fourier\":\n with torch.no_grad():\n return self.get_fourier_embeddings(xyz, num_channels, input_range)\n else:\n raise ValueError(f\"Unknown {self.pos_type}\")\n\n def extra_repr(self):\n st = f\"type={self.pos_type}, scale={self.scale}, normalize={self.normalize}\"\n if hasattr(self, \"gauss_B\"):\n st += (\n f\", gaussB={self.gauss_B.shape}, gaussBsum={self.gauss_B.sum().item()}\"\n )\n return st" }, { "identifier": "PositionalEmbedding", "path": "models/position_embedding.py", "snippet": "class PositionalEmbedding(nn.Module):\n def __init__(self, sigma=1, dim=4096):\n super().__init__()\n self.sigma = sigma\n self.dim = dim // 2\n self.w = torch.randn((self.dim, 3)) * sigma\n self.w = nn.Parameter(self.w, requires_grad=True)\n\n def forward(self, x):\n bs, obj_num, _ = x.shape\n x = x.reshape(-1, 3)\n v = torch.cat([torch.sin(self.w.detach() @ x.T), torch.cos(self.w.detach() @ x.T)])\n v = v.T.reshape(bs, obj_num, -1)\n v_norm = v / v.norm(dim=-1).unsqueeze(-1)\n return v_norm" } ]
import random import logging import torch import torch.nn as nn import torch.nn.functional as F import contextlib from abc import ABC from torch.cuda.amp import autocast as autocast from .modeling_llama import LlamaForCausalLM from transformers import LlamaTokenizer, LlamaConfig from models.transformer_vanilla import TransformerEncoder, CMT from models.helpers import GenericMLP from models.position_embedding import PositionEmbeddingCoordsSine, PositionalEmbedding from transformers import StoppingCriteria, StoppingCriteriaList
6,010
module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) return _init_weights class CustomGradLayer(torch.autograd.Function): @staticmethod def forward(ctx, input, coefficient=1.0): ctx.coefficient = coefficient return input @staticmethod def backward(ctx, grad_output): grad_input = grad_output * ctx.coefficient return grad_input, None class Chat3D(nn.Module): """ VideoChat model. """ def __init__(self, config): super().__init__() llama_model_path = config.get("llama_model_path") low_resource = config.get("low_resource", False) # prompt self.prompt_template = config.get("prompt_template", "") self.max_txt_len = config.get("max_txt_len", 32) self.end_sym = config.get("end_sym", '\n') self.system_path = config.get("system_path", "") self.begin_signal = "###" self.role = ("Human", "Assistant") self.pc_start_token, self.pc_end_token = "<Target>", "</Target>" self.scene_start_token, self.scene_end_token = "<Scene>", "</Scene>" self.add_scene_token = config.get("add_scene_token", True) self.debug = config.get("debug", False) self.obj_norm_scale = config.get("obj_norm_scale", 1) self.scene_norm_scale = config.get("scene_norm_scale", 1) self.grad_scale = config.get("grad_scale", 1) mlp_dropout = config.get("mlp_dropout", 0.5) self.stage = config.get("stage", 1) self.low_resource = low_resource self.input_dim = config.get("input_dim", 512) self.attr_dim = config.get("attr_dim", 512) self.inter_dim = self.input_dim + self.attr_dim * 2 if not self.debug: logger.info('Loading LLAMA') self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False) self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token if self.low_resource: self.llama_model = LlamaForCausalLM.from_pretrained( llama_model_path, torch_dtype=torch.float16, load_in_8bit=True, device_map="auto" ) else: self.llama_model = LlamaForCausalLM.from_pretrained( llama_model_path, torch_dtype=torch.float16, ) logger.info("freeze LLAMA") for name, param in self.llama_model.named_parameters(): param.requires_grad = False # if self.stage != 1: # for layer_ind in range(30, 32): # for param in self.llama_model.model.layers[layer_ind].parameters(): # param.requires_grad = True # param.data = param.data.float() self.llama_dim = self.llama_model.config.hidden_size logger.info('Loading LLAMA Done') else: self.llama_model = None self.llama_dim = 4096 # self.object_input_proj = nn.Sequential( # nn.Linear(self.input_dim, self.input_dim), # # nn.ReLU(), # # nn.LayerNorm(self.input_dim), # ) self.coord_proj = nn.Sequential( nn.Linear(3, self.attr_dim), # nn.ReLU(), # nn.LayerNorm(self.attr_dim), # nn.Dropout(mlp_dropout) ) self.color_proj = nn.Sequential( nn.Linear(3, self.attr_dim), # nn.ReLU(), # nn.LayerNorm(self.attr_dim), # nn.Dropout(mlp_dropout) ) # self.color_dropout = nn.Dropout(mlp_dropout) # self.pos_proj = nn.Sequential( # nn.Linear(6, self.inter_dim), # nn.LayerNorm(self.inter_dim) # ) # self.pos_embedding = PositionalEmbedding(dim=self.llama_dim) self.pos_proj = nn.Sequential( nn.Linear(3, self.llama_dim) ) self.object_proj = nn.Sequential( nn.Linear(self.inter_dim, self.llama_dim), nn.GELU(), nn.Dropout(mlp_dropout), nn.LayerNorm(self.llama_dim), nn.Linear(self.llama_dim, self.llama_dim) ) self.scene_proj = nn.Sequential( nn.Linear(self.llama_dim, self.llama_dim), ) self.encoder_num_layers = int(config.get("encoder_num_layers", 1))
logger = logging.getLogger(__name__) class StoppingCriteriaSub(StoppingCriteria): def __init__(self, stops=[], encounters=1): super().__init__() self.stops = stops def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): for stop in self.stops: if torch.all((stop == input_ids[0][-len(stop):])).item(): return True return False def init_weights(std=0.02): def _init_weights(module): """Initialize the weights""" if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) return _init_weights class CustomGradLayer(torch.autograd.Function): @staticmethod def forward(ctx, input, coefficient=1.0): ctx.coefficient = coefficient return input @staticmethod def backward(ctx, grad_output): grad_input = grad_output * ctx.coefficient return grad_input, None class Chat3D(nn.Module): """ VideoChat model. """ def __init__(self, config): super().__init__() llama_model_path = config.get("llama_model_path") low_resource = config.get("low_resource", False) # prompt self.prompt_template = config.get("prompt_template", "") self.max_txt_len = config.get("max_txt_len", 32) self.end_sym = config.get("end_sym", '\n') self.system_path = config.get("system_path", "") self.begin_signal = "###" self.role = ("Human", "Assistant") self.pc_start_token, self.pc_end_token = "<Target>", "</Target>" self.scene_start_token, self.scene_end_token = "<Scene>", "</Scene>" self.add_scene_token = config.get("add_scene_token", True) self.debug = config.get("debug", False) self.obj_norm_scale = config.get("obj_norm_scale", 1) self.scene_norm_scale = config.get("scene_norm_scale", 1) self.grad_scale = config.get("grad_scale", 1) mlp_dropout = config.get("mlp_dropout", 0.5) self.stage = config.get("stage", 1) self.low_resource = low_resource self.input_dim = config.get("input_dim", 512) self.attr_dim = config.get("attr_dim", 512) self.inter_dim = self.input_dim + self.attr_dim * 2 if not self.debug: logger.info('Loading LLAMA') self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model_path, use_fast=False) self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token if self.low_resource: self.llama_model = LlamaForCausalLM.from_pretrained( llama_model_path, torch_dtype=torch.float16, load_in_8bit=True, device_map="auto" ) else: self.llama_model = LlamaForCausalLM.from_pretrained( llama_model_path, torch_dtype=torch.float16, ) logger.info("freeze LLAMA") for name, param in self.llama_model.named_parameters(): param.requires_grad = False # if self.stage != 1: # for layer_ind in range(30, 32): # for param in self.llama_model.model.layers[layer_ind].parameters(): # param.requires_grad = True # param.data = param.data.float() self.llama_dim = self.llama_model.config.hidden_size logger.info('Loading LLAMA Done') else: self.llama_model = None self.llama_dim = 4096 # self.object_input_proj = nn.Sequential( # nn.Linear(self.input_dim, self.input_dim), # # nn.ReLU(), # # nn.LayerNorm(self.input_dim), # ) self.coord_proj = nn.Sequential( nn.Linear(3, self.attr_dim), # nn.ReLU(), # nn.LayerNorm(self.attr_dim), # nn.Dropout(mlp_dropout) ) self.color_proj = nn.Sequential( nn.Linear(3, self.attr_dim), # nn.ReLU(), # nn.LayerNorm(self.attr_dim), # nn.Dropout(mlp_dropout) ) # self.color_dropout = nn.Dropout(mlp_dropout) # self.pos_proj = nn.Sequential( # nn.Linear(6, self.inter_dim), # nn.LayerNorm(self.inter_dim) # ) # self.pos_embedding = PositionalEmbedding(dim=self.llama_dim) self.pos_proj = nn.Sequential( nn.Linear(3, self.llama_dim) ) self.object_proj = nn.Sequential( nn.Linear(self.inter_dim, self.llama_dim), nn.GELU(), nn.Dropout(mlp_dropout), nn.LayerNorm(self.llama_dim), nn.Linear(self.llama_dim, self.llama_dim) ) self.scene_proj = nn.Sequential( nn.Linear(self.llama_dim, self.llama_dim), ) self.encoder_num_layers = int(config.get("encoder_num_layers", 1))
self.relation_module = CMT(hidden_size=self.llama_dim, num_layers=self.encoder_num_layers)
2
2023-12-11 14:39:58+00:00
8k
SqueezeBits/owlite
owlite/nn/modules/qmodule_mixins.py
[ { "identifier": "log", "path": "owlite/logger.py", "snippet": "class Logger(logging.Logger):\n class _WarningFilterContext:\n class WarningFilter(logging.Filter):\n ENV_VAR = \"OWLITE_LOG_LEVEL\"\n DEBUG_WARNING = 15\n ULTRA_VERBOSE = -10\n def ignore_warnings(self):\n def __init__(self, logger) -> None:\n def __enter__(self):\n def filter(self, record):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def debug_warning(self, msg, *args, **kwargs):\n def level(self) -> int:\n def level(self, value):\ndef suppress_owlite_warnings(cls):\n def new_init(self, *args, **kwargs):" }, { "identifier": "FakeQuantizer", "path": "owlite/nn/fake_quantizer.py", "snippet": "class FakeQuantizer(torch.nn.Module):\n \"\"\"An implementation of fake quantization (a.k.a. quantization simulation)\n\n Attributes:\n step_size (torch.Tensor): The quantization scale, determining the magnitude of each quantization interval.\n zero_point (torch.Tensor): The quantization zero_point. It may be expressed as a float in the context\n of asymmetric quantization, while for symmetric quantization, it is fixed at zero tensor.\n precision (torch.IntTensor): The number of bits used for quantization.\n symmetric (torch.BoolTensor): Whether symmetric quantization is applied.\n unsigned (torch.BoolTensor): Whether unsigned quantization is applied\n per_channel (torch.BoolTensor): Whether per-channel quantization or per-tensor quantization is applied\n learn_zero_point (torch.BoolTensor): whether the zero point is learnable.\n grad_scale (torch.FloatTensor): The gradient scaling factor of quantization parameters.\n _narrow_range (torch.BoolTensor): Whether a narrow range is used in quantization.\n \"\"\"\n\n precision: torch.IntTensor\n symmetric: torch.BoolTensor\n unsigned: torch.BoolTensor\n per_channel: torch.BoolTensor\n learn_zero_point: torch.BoolTensor\n grad_scale: torch.FloatTensor\n _narrow_range: torch.BoolTensor\n\n @classmethod\n def create(\n cls,\n options: Optional[FakeQuantizerOptions],\n channel_size: Optional[int] = None,\n enable: bool = True,\n narrow_range: bool = False,\n ) -> Optional[\"FakeQuantizer\"]:\n \"\"\"Creates a `FakeQuantizer` instance if options is not `None`, otherwise returns `None`\n\n Args:\n options (Optional[FakeQuantizerOptions]): Options for fake quantizer to return. If `None`,\n dose notcreate fake quantizer.\n channel_size (Optional[int], optional): Channel size of per-channel quantization. Not used in\n per-tensor quantization. If `None`, no channel size is set. Defaults to `None`.\n enable (bool, optional): If true, returns the enabled quantzier. If false, returns the quantizer\n that was disabled. Defaults to `True`\n narrow_range (bool, optional): If true, returns the quantzier with a narrow range. If false, it\n does not have a narrow range. Defaults to `False`\n\n Returns:\n Optional[FakeQuantizer]: If the `options` is valid for quantization returns created fake quantizer.\n Otherwise return `None`.\n \"\"\"\n if options is None or options.precision > 8:\n return None\n return FakeQuantizer(options, channel_size, enable, narrow_range)\n\n def __init__(\n self,\n options: FakeQuantizerOptions,\n channel_size: Optional[int] = None,\n enable: bool = True,\n narrow_range: bool = False,\n ):\n \"\"\"Initializes a FakeQuantizer instance.\n\n Args:\n options (QuantizerOptions): options\n channel_size (Optional[int], optional): The channel size for per-channel quantization. Defaults to None.\n This value is required only when `options.per_channel` is `True`, otherwise has no effect.\n It can be set after the instantiation of the object, must be set before calling its `forward` method.\n enable (bool, optional): whether to enable this quantizer object as soon as it is initialized.\n Defaults to True.\n narrow_range (bool, optional): Use symmetric integer range for signed quantization\n eg) [-127,127] instead of [-128,127] for num_bits=8. Default False.\n\n Raises:\n ValueError: if `options.ptq_calibration` is \"percentile\" but `options.percentile` is `None`.\n \"\"\"\n super().__init__()\n self.register_buffer(\"precision\", torch.tensor(options.precision))\n self.register_buffer(\"symmetric\", torch.tensor(options.symmetric))\n self.register_buffer(\"unsigned\", torch.tensor(options.unsigned))\n self.register_buffer(\"per_channel\", torch.tensor(options.per_channel))\n if not self.symmetric.item() and self.per_channel.item():\n raise RuntimeError(\"asymmetric per_channel quantization is not available\")\n self.register_buffer(\"learn_zero_point\", torch.tensor(options.learn_zero_point))\n self.register_buffer(\"grad_scale\", torch.tensor(options.grad_scale))\n if narrow_range and not (self.symmetric.item() and not self.unsigned.item()):\n log.warning(\n \"narrow_range should only be used with symmetric signed quantization.\\n\"\n \"(narrow_range, symmetric, unsigned) = \"\n f\"({narrow_range}, {self.symmetric.item()}, {self.unsigned.item()})\"\n )\n self.register_buffer(\"_narrow_range\", torch.tensor(narrow_range))\n\n if self.per_channel:\n if channel_size is not None:\n self.channel_size = channel_size\n else:\n self.step_size = torch.nn.Parameter(torch.ones(1))\n self.zero_point = torch.nn.Parameter(\n torch.zeros(1),\n requires_grad=bool(not self.symmetric.item() and self.learn_zero_point.item()),\n )\n self._is_enabled = enable\n self.is_zero_point_folded = False\n self.qat_backward_type = options.qat_backward\n self.ptq_calibration = options.ptq_calibration\n calibrator_class = options.ptq_calibration.calibrator_class\n if options.ptq_calibration == PTQCalibrationType.percentile:\n if options.percentile is None:\n raise ValueError(\"percentile value is required for percentile PTQ calibrator\")\n self.calibrator = calibrator_class(self, options.percentile)\n else:\n self.calibrator = calibrator_class(self)\n\n @property\n def qat_function(\n self,\n ) -> FakeQuantFunc:\n \"\"\"The autograd function providing forward and backward methods of this fake quantizer\n for the quantization-aware training\"\"\"\n return self.qat_backward_type.function\n\n @property\n def channel_size(self) -> Optional[int]:\n \"\"\"The channel size for the input tensor of this fake quantizer\"\"\"\n if not self.per_channel.item():\n return 1\n step_size = getattr(self, \"step_size\", None)\n zero_point = getattr(self, \"zero_point\", None)\n if not (\n isinstance(step_size, (torch.nn.Parameter, torch.Tensor))\n and isinstance(zero_point, (torch.nn.Parameter, torch.Tensor))\n ):\n return None\n if not (len(step_size.shape) == 1 and step_size.shape == zero_point.shape):\n log.error(\"step_size and zero_point have invalid shapes.\")\n log.debug(f\"self={self}\\n\" \"self.step_size={step_size}\\n\" \"self.zero_point={zero_point}\\n\")\n raise ValueError(\"step_size and zero_point have invalid shapes\")\n return int(step_size.shape[0])\n\n @channel_size.setter\n def channel_size(self, value: Optional[int]) -> None:\n \"\"\"Sets the channel size for the input tensor of this fake quantizer. Note that this property must be set at\n least (and exactly) once before calling this fake quantizer instance when `per_channel=True`\n \"\"\"\n if not self.per_channel.item():\n log.warning(\n \"Setting channel_size value will have no effect for per tensor weight quantization.\",\n stacklevel=2,\n )\n return\n existing_channel_size = self.channel_size\n if existing_channel_size is not None:\n log.error(f\"channel_size value was already set to {existing_channel_size}. It cannot be reset.\")\n raise RuntimeError(\"channel_size cannot be reset.\")\n if value is None:\n return\n self.step_size = torch.nn.Parameter(torch.ones(value))\n self.zero_point = torch.nn.Parameter(\n torch.zeros(value),\n requires_grad=bool(not self.symmetric.item() and self.learn_zero_point.item()),\n )\n\n @property\n def quant_min(self) -> int:\n \"\"\"The minimum integer value this fake quantizer can handle\"\"\"\n if self.narrow:\n return int(-(1 << (int(self.precision.item()) - 1)) + 1)\n return 0 if self.unsigned.item() else int(-(1 << (int(self.precision.item()) - 1)))\n\n @property\n def quant_max(self) -> int:\n \"\"\"The maximum integer value this fake quantizer can handle\"\"\"\n if self.narrow:\n return (1 << int(self.precision.item())) - 1 + self.quant_min - 1\n return (1 << int(self.precision.item())) - 1 + self.quant_min\n\n @property\n def narrow(self) -> bool:\n \"\"\"Returns True in quantizer using narrow range and False otherwise.\"\"\"\n if torch.jit.is_tracing():\n return False\n return bool(self._narrow_range.item() and not self.unsigned.item() and self.symmetric.item())\n\n @property\n def is_enabled(self) -> bool:\n \"\"\"get quantizer mode\"\"\"\n return self._is_enabled\n\n def enable(self, mode: bool = True) -> None:\n \"\"\"Sets Quantizer in quantization enabling mode\n\n Args:\n mode (bool, optional): If `True`, enable quantization. Otherwise, disable quantization. Defaults to `True`.\n \"\"\"\n self._is_enabled = mode\n\n def disable(self) -> None:\n \"\"\"Sets quantizer in quantization disabling mode\"\"\"\n self._is_enabled = False\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n \"\"\"The forward pass of fake quantizer\n\n Args:\n inputs (torch.Tensor): A tensor to fake-quantize.\n\n Raises\n ValueError: If fake quantizer has negative step_size or size of channel is invalid.\n\n Returns\n torch.Tensor: If fake quantizer is enabled, it returns a fake-quantized tensor.\n If fake quantizer is disable, it returns the value as entered.\n \"\"\"\n if not self._is_enabled:\n return inputs\n\n if (self.per_channel) and isinstance(inputs, torch.Tensor) and (self.channel_size != inputs.shape[0]):\n if self.channel_size is None:\n raise ValueError(\"channel_size(=None) must be set for per channel weight quantization\")\n raise ValueError(\n f\"channel_size(={self.channel_size}) value must be the same as \"\n f\"the first dimension of the input tensor (={inputs.shape[0]}).\"\n )\n\n if self.step_size.min() <= 0:\n log.error(\n f\"Expected step_size to be positive, but got step_size={self.step_size.data}. \"\n \"Please try one of the suggestions below:\\n\"\n \" * set the weight_decay of the fake quantizer's parameters to 0;\\n\"\n \" * reduce the learning rate for the fake quantizer's parameters; or\\n\"\n \" * reduce the grad_scale of the fake quantizer\"\n )\n raise ValueError(\"Step_size must be positive\")\n\n return self.qat_function(\n inputs,\n self.step_size,\n self.zero_point,\n self.grad_scale,\n self.quant_min,\n self.quant_max,\n self.per_channel,\n not self.is_zero_point_folded,\n )\n\n def invert_signedness(self) -> None:\n \"\"\"Inverts signedness of this fake quantizer\"\"\"\n self.unsigned.data = torch.logical_not(self.unsigned.data)\n\n # pylint: disable=protected-access\n def extra_repr(self) -> str:\n if self.precision.item() == 32:\n return f\"precision: {self.precision.item()}\"\n string = f\"{self.qat_backward_type}(precision: {self.precision.item()}\"\n string += \", per_tensor\" if not self.per_channel.item() else \", per_channel\"\n string += f\", quant_min: {self.quant_min}, quant_max: {self.quant_max}\"\n if not self.symmetric.item():\n string += \", asymmetric\"\n string += (\n f\", zero_point: {self.zero_point.item()}, is_zero_point_folded: {self.is_zero_point_folded}\"\n if not self.per_channel.item()\n else \"\"\n )\n string += f\", is_enabled: {self.is_enabled}\"\n string += f\", calib: {self.calibrator.__class__.__name__}\"\n string += \")\"\n return string\n\n @property\n def maxabs_bound(self) -> int:\n \"\"\"The maximum absolute limit value of the quantized domain.\n\n Returns:\n int: A Maximum absolute bound value.\n \"\"\"\n return max(abs(self.quant_min), abs(self.quant_max))\n\n @property\n def options(self) -> FakeQuantizerOptions:\n \"\"\"The options that current FakeQuantizer instance represents.\"\"\"\n percentile = getattr(self.calibrator, \"percentile\", None)\n zero_point = getattr(self, \"zero_point\", None)\n learn_zero_point = False if zero_point is None else zero_point.requires_grad\n\n return FakeQuantizerOptions(\n qat_backward=self.qat_backward_type,\n ptq_calibration=self.ptq_calibration,\n percentile=percentile,\n precision=int(self.precision.item()),\n symmetric=bool(self.symmetric.item()),\n unsigned=bool(self.unsigned.item()),\n per_channel=bool(self.per_channel.item()),\n learn_zero_point=learn_zero_point,\n grad_scale=self.grad_scale.item(),\n )\n\n def state_dict( # type: ignore[no-untyped-def, override]\n self, *args, **kwargs\n ) -> Union[OrderedDict[Any, Any], dict[str, Any]]:\n \"\"\"Stores the indices of ptq_calibration and qat_backward in addition to the torch state dict.\n\n Returns:\n dict:\n a dictionary containing a whole state of the module.\n \"\"\"\n state: OrderedDict = super().state_dict(*args, **kwargs)\n prefix = kwargs.get(\"prefix\")\n extra_state = {}\n # add qat_backward index\n extra_state[f\"{prefix}_qat_backward\"] = torch.tensor([self.qat_backward_type.value])\n # add ptq_calibration index\n extra_state[f\"{prefix}_ptq_calibration\"] = torch.tensor([self.ptq_calibration.value])\n if self.ptq_calibration == PTQCalibrationType.percentile:\n if not isinstance(self.calibrator, PercentileCalibrator):\n raise TypeError(\n \"calibrator must be instance of 'PercentileCalibrator' when ptq_calibrtion is 'percentile',\"\n f\"but got {self.calibrator}\"\n )\n extra_state[f\"{prefix}_ptq_calibration_percentile\"] = torch.tensor([self.calibrator.percentile])\n state.update(extra_state)\n return state\n\n def _load_from_state_dict(\n self,\n state_dict: dict,\n prefix: str,\n local_metadata: dict,\n strict: bool,\n missing_keys: list[str],\n unexpected_keys: list[str],\n error_msgs: list[str],\n ) -> None:\n self.qat_backward_type = QATBackwardType(state_dict.pop(f\"{prefix}_qat_backward\").item())\n self.ptq_calibration = PTQCalibrationType(state_dict.pop(f\"{prefix}_ptq_calibration\").item())\n calibrator_class = self.ptq_calibration.calibrator_class\n if self.ptq_calibration == PTQCalibrationType.percentile:\n self.calibrator = calibrator_class(self, state_dict.pop(f\"{prefix}_ptq_calibration_percentile\").item())\n else:\n self.calibrator = calibrator_class(self)\n return super()._load_from_state_dict(\n state_dict,\n prefix,\n local_metadata,\n strict,\n missing_keys,\n unexpected_keys,\n error_msgs,\n )" } ]
from typing import Optional from ...logger import log from ..fake_quantizer import FakeQuantizer import torch
3,972
""" Util classes using at quantized modules""" class UnaryNeuralQModuleMixin: """ Mixin-class for implementing weight-quantized counterparts of subclasses of torch.nn.Module with the parameters named 'weight' and 'bias' such that whose `forward` method takes exactly one parameter other than 'self'. Examples: `torch.nn.Conv1d`, `torch.nn.Conv2d`, `torch.nn.Conv3d`, `torch.nn.Linear` """ weight: torch.nn.Parameter bias: Optional[torch.nn.Parameter]
""" Util classes using at quantized modules""" class UnaryNeuralQModuleMixin: """ Mixin-class for implementing weight-quantized counterparts of subclasses of torch.nn.Module with the parameters named 'weight' and 'bias' such that whose `forward` method takes exactly one parameter other than 'self'. Examples: `torch.nn.Conv1d`, `torch.nn.Conv2d`, `torch.nn.Conv3d`, `torch.nn.Linear` """ weight: torch.nn.Parameter bias: Optional[torch.nn.Parameter]
input_quantizer: Optional[FakeQuantizer]
1
2023-12-08 06:41:50+00:00
8k
ximinng/PyTorch-SVGRender
pytorch_svgrender/painter/wordasimage/painter_params.py
[ { "identifier": "DiffVGState", "path": "pytorch_svgrender/diffvg_warp/diffvg_state.py", "snippet": "class DiffVGState(torch.nn.Module):\n\n def __init__(self,\n device: torch.device,\n use_gpu: bool = torch.cuda.is_available(),\n print_timing: bool = False,\n canvas_width: int = None,\n canvas_height: int = None):\n super(DiffVGState, self).__init__()\n # pydiffvg device setting\n self.device = device\n init_pydiffvg(device, use_gpu, print_timing)\n\n # canvas size\n self.canvas_width = canvas_width\n self.canvas_height = canvas_height\n\n # record all paths\n self.shapes = []\n self.shape_groups = []\n # record the current optimized path\n self.cur_shapes = []\n self.cur_shape_groups = []\n\n # learnable SVG params\n self.point_vars = []\n self.color_vars = []\n self.width_vars = []\n\n def clip_curve_shape(self, *args, **kwargs):\n raise NotImplementedError\n\n def render_warp(self, seed=0):\n self.clip_curve_shape()\n\n scene_args = pydiffvg.RenderFunction.serialize_scene(\n self.canvas_width, self.canvas_height, self.shapes, self.shape_groups\n )\n _render = pydiffvg.RenderFunction.apply\n img = _render(self.canvas_width, # width\n self.canvas_height, # height\n 2, # num_samples_x\n 2, # num_samples_y\n seed, # seed\n None,\n *scene_args)\n return img\n\n @staticmethod\n def load_svg(path_svg):\n canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(path_svg)\n return canvas_width, canvas_height, shapes, shape_groups\n\n def save_svg(self,\n filename: Union[AnyStr, pathlib.Path],\n width: int = None,\n height: int = None,\n shapes: List = None,\n shape_groups: List = None,\n use_gamma: bool = False,\n background: str = None):\n \"\"\"\n Save an SVG file with specified parameters and shapes.\n Noting: New version of SVG saving function that is an adaptation of pydiffvg.save_svg.\n The original version saved words resulting in incomplete glyphs.\n\n Args:\n filename (str): The path to save the SVG file.\n width (int): The width of the SVG canvas.\n height (int): The height of the SVG canvas.\n shapes (list): A list of shapes to be included in the SVG.\n shape_groups (list): A list of shape groups.\n use_gamma (bool): Flag indicating whether to apply gamma correction.\n background (str, optional): The background color of the SVG.\n\n Returns:\n None\n \"\"\"\n root = etree.Element('svg')\n root.set('version', '1.1')\n root.set('xmlns', 'http://www.w3.org/2000/svg')\n root.set('width', str(width))\n root.set('height', str(height))\n\n if background is not None:\n print(f\"setting background to {background}\")\n root.set('style', str(background))\n\n defs = etree.SubElement(root, 'defs')\n g = etree.SubElement(root, 'g')\n\n if use_gamma:\n f = etree.SubElement(defs, 'filter')\n f.set('id', 'gamma')\n f.set('x', '0')\n f.set('y', '0')\n f.set('width', '100%')\n f.set('height', '100%')\n gamma = etree.SubElement(f, 'feComponentTransfer')\n gamma.set('color-interpolation-filters', 'sRGB')\n feFuncR = etree.SubElement(gamma, 'feFuncR')\n feFuncR.set('type', 'gamma')\n feFuncR.set('amplitude', str(1))\n feFuncR.set('exponent', str(1 / 2.2))\n feFuncG = etree.SubElement(gamma, 'feFuncG')\n feFuncG.set('type', 'gamma')\n feFuncG.set('amplitude', str(1))\n feFuncG.set('exponent', str(1 / 2.2))\n feFuncB = etree.SubElement(gamma, 'feFuncB')\n feFuncB.set('type', 'gamma')\n feFuncB.set('amplitude', str(1))\n feFuncB.set('exponent', str(1 / 2.2))\n feFuncA = etree.SubElement(gamma, 'feFuncA')\n feFuncA.set('type', 'gamma')\n feFuncA.set('amplitude', str(1))\n feFuncA.set('exponent', str(1 / 2.2))\n g.set('style', 'filter:url(#gamma)')\n\n # Store color\n for i, shape_group in enumerate(shape_groups):\n def add_color(shape_color, name):\n if isinstance(shape_color, pydiffvg.LinearGradient):\n lg = shape_color\n color = etree.SubElement(defs, 'linearGradient')\n color.set('id', name)\n color.set('x1', str(lg.begin[0].item()))\n color.set('y1', str(lg.begin[1].item()))\n color.set('x2', str(lg.end[0].item()))\n color.set('y2', str(lg.end[1].item()))\n offsets = lg.offsets.data.cpu().numpy()\n stop_colors = lg.stop_colors.data.cpu().numpy()\n for j in range(offsets.shape[0]):\n stop = etree.SubElement(color, 'stop')\n stop.set('offset', str(offsets[j]))\n c = lg.stop_colors[j, :]\n stop.set('stop-color', 'rgb({}, {}, {})'.format(\n int(255 * c[0]), int(255 * c[1]), int(255 * c[2])\n ))\n stop.set('stop-opacity', '{}'.format(c[3]))\n if isinstance(shape_color, pydiffvg.RadialGradient):\n lg = shape_color\n color = etree.SubElement(defs, 'radialGradient')\n color.set('id', name)\n color.set('cx', str(lg.center[0].item() / width))\n color.set('cy', str(lg.center[1].item() / height))\n # this only support width=height\n color.set('r', str(lg.radius[0].item() / width))\n offsets = lg.offsets.data.cpu().numpy()\n stop_colors = lg.stop_colors.data.cpu().numpy()\n for j in range(offsets.shape[0]):\n stop = etree.SubElement(color, 'stop')\n stop.set('offset', str(offsets[j]))\n c = lg.stop_colors[j, :]\n stop.set('stop-color', 'rgb({}, {}, {})'.format(\n int(255 * c[0]), int(255 * c[1]), int(255 * c[2])\n ))\n stop.set('stop-opacity', '{}'.format(c[3]))\n\n if shape_group.fill_color is not None:\n add_color(shape_group.fill_color, 'shape_{}_fill'.format(i))\n if shape_group.stroke_color is not None:\n add_color(shape_group.stroke_color, 'shape_{}_stroke'.format(i))\n\n for i, shape_group in enumerate(shape_groups):\n shape = shapes[shape_group.shape_ids[0]]\n if isinstance(shape, pydiffvg.Circle):\n shape_node = etree.SubElement(g, 'circle')\n shape_node.set('r', str(shape.radius.item()))\n shape_node.set('cx', str(shape.center[0].item()))\n shape_node.set('cy', str(shape.center[1].item()))\n elif isinstance(shape, pydiffvg.Polygon):\n shape_node = etree.SubElement(g, 'polygon')\n points = shape.points.data.cpu().numpy()\n path_str = ''\n for j in range(0, shape.points.shape[0]):\n path_str += '{} {}'.format(points[j, 0], points[j, 1])\n if j != shape.points.shape[0] - 1:\n path_str += ' '\n shape_node.set('points', path_str)\n elif isinstance(shape, pydiffvg.Path):\n for j, id in enumerate(shape_group.shape_ids):\n shape = shapes[id]\n if isinstance(shape, pydiffvg.Path):\n if j == 0:\n shape_node = etree.SubElement(g, 'path')\n node_id = shape_node.get('id')\n path_str = ''\n\n num_segments = shape.num_control_points.shape[0]\n num_control_points = shape.num_control_points.data.cpu().numpy()\n points = shape.points.data.cpu().numpy()\n num_points = shape.points.shape[0]\n path_str += 'M {} {}'.format(points[0, 0], points[0, 1])\n point_id = 1\n for j in range(0, num_segments):\n if num_control_points[j] == 0:\n p = point_id % num_points\n path_str += ' L {} {}'.format(\n points[p, 0], points[p, 1])\n point_id += 1\n elif num_control_points[j] == 1:\n p1 = (point_id + 1) % num_points\n path_str += ' Q {} {} {} {}'.format(\n points[point_id, 0], points[point_id, 1],\n points[p1, 0], points[p1, 1])\n point_id += 2\n elif num_control_points[j] == 2:\n p2 = (point_id + 2) % num_points\n path_str += ' C {} {} {} {} {} {}'.format(\n points[point_id, 0], points[point_id, 1],\n points[point_id + 1, 0], points[point_id + 1, 1],\n points[p2, 0], points[p2, 1])\n point_id += 3\n if node_id is not None:\n shape_node.set('id', node_id) # add id to Path\n shape_node.set('d', path_str)\n elif isinstance(shape, pydiffvg.Rect):\n shape_node = etree.SubElement(g, 'rect')\n shape_node.set('x', str(shape.p_min[0].item()))\n shape_node.set('y', str(shape.p_min[1].item()))\n shape_node.set('width', str(shape.p_max[0].item() - shape.p_min[0].item()))\n shape_node.set('height', str(shape.p_max[1].item() - shape.p_min[1].item()))\n elif isinstance(shape, pydiffvg.Ellipse):\n shape_node = etree.SubElement(g, 'ellipse')\n shape_node.set('cx', str(shape.center[0].item()))\n shape_node.set('cy', str(shape.center[1].item()))\n shape_node.set('rx', str(shape.radius[0].item()))\n shape_node.set('ry', str(shape.radius[1].item()))\n else:\n raise NotImplementedError(f'shape type: {type(shape)} is not involved in pydiffvg.')\n\n shape_node.set('stroke-width', str(2 * shape.stroke_width.data.cpu().item()))\n if shape_group.fill_color is not None:\n if isinstance(shape_group.fill_color, pydiffvg.LinearGradient):\n shape_node.set('fill', 'url(#shape_{}_fill)'.format(i))\n else:\n c = shape_group.fill_color.data.cpu().numpy()\n shape_node.set('fill', 'rgb({}, {}, {})'.format(\n int(255 * c[0]), int(255 * c[1]), int(255 * c[2])))\n shape_node.set('opacity', str(c[3]))\n else:\n shape_node.set('fill', 'none')\n if shape_group.stroke_color is not None:\n if isinstance(shape_group.stroke_color, pydiffvg.LinearGradient):\n shape_node.set('stroke', 'url(#shape_{}_stroke)'.format(i))\n else:\n c = shape_group.stroke_color.data.cpu().numpy()\n shape_node.set('stroke', 'rgb({}, {}, {})'.format(\n int(255 * c[0]), int(255 * c[1]), int(255 * c[2])))\n shape_node.set('stroke-opacity', str(c[3]))\n shape_node.set('stroke-linecap', 'round')\n shape_node.set('stroke-linejoin', 'round')\n\n with open(filename, \"w\") as f:\n f.write(pydiffvg.prettify(root))\n\n @staticmethod\n def save_image(img, filename, gamma=1):\n if torch.is_tensor(img) and torch.device != 'cpu':\n img = img.detach().cpu()\n pydiffvg.imwrite(img, filename, gamma=gamma)" }, { "identifier": "font_string_to_beziers", "path": "pytorch_svgrender/painter/wordasimage/ttf.py", "snippet": "def font_string_to_beziers(font, txt, size=30, spacing=1.0, merge=True, target_control=None):\r\n \"\"\"\r\n Load a font and convert the outlines for a given string to cubic bezier curves,\r\n if merge is True, simply return a list of all bezier curves,\r\n otherwise return a list of lists with the bezier curves for each glyph\r\n \"\"\"\r\n\r\n face = ft.Face(font)\r\n face.set_char_size(64 * size)\r\n slot = face.glyph\r\n\r\n x = 0\r\n beziers = []\r\n previous = 0\r\n for c in txt:\r\n face.load_char(c, ft.FT_LOAD_DEFAULT | ft.FT_LOAD_NO_BITMAP)\r\n bez = glyph_to_cubics(face, x)\r\n\r\n # Check number of control points if desired\r\n if target_control is not None:\r\n if c in target_control.keys():\r\n nctrl = np.sum([len(C) for C in bez])\r\n while nctrl < target_control[c]:\r\n longest = np.max(\r\n sum([[bezier.approx_arc_length(b) for b in bezier.chain_to_beziers(C)] for C in bez], []))\r\n thresh = longest * 0.5\r\n bez = [bezier.subdivide_bezier_chain(C, thresh) for C in bez]\r\n nctrl = np.sum([len(C) for C in bez])\r\n print(\"nctrl: \", nctrl)\r\n\r\n if merge:\r\n beziers += bez\r\n else:\r\n beziers.append(bez)\r\n\r\n kerning = face.get_kerning(previous, c)\r\n x += (slot.advance.x + kerning.x) * spacing\r\n previous = c\r\n\r\n return beziers\r" }, { "identifier": "write_letter_svg", "path": "pytorch_svgrender/painter/wordasimage/ttf.py", "snippet": "def write_letter_svg(c, header, fontname, beziers, subdivision_thresh, dest_path):\r\n cmds = ''\r\n svg = header\r\n\r\n path = '<g><path d=\"'\r\n for C in beziers:\r\n if subdivision_thresh is not None:\r\n print('subd')\r\n C = bezier.subdivide_bezier_chain(C, subdivision_thresh)\r\n cmds += bezier_chain_to_commands(C, True)\r\n path += cmds + '\"/>\\n'\r\n svg += path + '</g></svg>\\n'\r\n\r\n fname = f\"{dest_path}/{fontname}_{c}.svg\"\r\n fname = fname.replace(\" \", \"_\")\r\n with open(fname, 'w') as f:\r\n f.write(svg)\r\n return fname, path\r" } ]
import os import pathlib import numpy as np import pydiffvg import torch from torch.optim.lr_scheduler import LambdaLR from pytorch_svgrender.diffvg_warp import DiffVGState from .ttf import font_string_to_beziers, write_letter_svg
4,758
class Painter(DiffVGState): def __init__(self, font: str, canvas_size: int, device: torch.device): super(Painter, self).__init__(device=device, use_gpu=True, canvas_width=canvas_size, canvas_height=canvas_size) self.font = font def init_shape(self, path_svg, seed=0): assert pathlib.Path(path_svg).exists(), f"{path_svg} is not exist!" print(f"-> init svg from `{path_svg}` ...") # 1. load svg from path canvas_width, canvas_height, self.shapes, self.shape_groups = self.load_svg(path_svg) # 2. set learnable parameters self.set_point_parameters() img = self.render_warp(seed) img = img[:, :, 3:4] * img[:, :, :3] + \ torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) img = img[:, :, :3] img = img.unsqueeze(0) # convert img from HWC to NCHW img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW return img def get_image(self, step: int = 0): img = self.render_warp(step) img = img[:, :, 3:4] * img[:, :, :3] + \ torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) img = img[:, :, :3] img = img.unsqueeze(0) # convert img from HWC to NCHW img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW return img def clip_curve_shape(self): for group in self.shape_groups: group.fill_color.data.clamp_(0.0, 1.0) def set_point_parameters(self): # stroke`s location optimization self.point_vars = [] for i, path in enumerate(self.shapes): path.points.requires_grad = True self.point_vars.append(path.points) def get_point_parameters(self): return self.point_vars def preprocess_font(self, word, letter, level_of_cc=1, font_path=None, init_path=None): if level_of_cc == 0: target_cp = None else: target_cp = {"A": 120, "B": 120, "C": 100, "D": 100, "E": 120, "F": 120, "G": 120, "H": 120, "I": 35, "J": 80, "K": 100, "L": 80, "M": 100, "N": 100, "O": 100, "P": 120, "Q": 120, "R": 130, "S": 110, "T": 90, "U": 100, "V": 100, "W": 100, "X": 130, "Y": 120, "Z": 120, "a": 120, "b": 120, "c": 100, "d": 100, "e": 120, "f": 120, "g": 120, "h": 120, "i": 35, "j": 80, "k": 100, "l": 80, "m": 100, "n": 100, "o": 100, "p": 120, "q": 120, "r": 130, "s": 110, "t": 90, "u": 100, "v": 100, "w": 100, "x": 130, "y": 120, "z": 120} target_cp = {k: v * level_of_cc for k, v in target_cp.items()} print("init_path: ", init_path) subdivision_thresh = None self.font_string_to_svgs(init_path, font_path, word, target_control=target_cp, subdivision_thresh=subdivision_thresh) self.normalize_letter_size(init_path, font_path, word) # optimize two adjacent letters print("letter: ", letter) if len(letter) > 1: subdivision_thresh = None self.font_string_to_svgs(init_path, font_path, letter, target_control=target_cp, subdivision_thresh=subdivision_thresh) self.normalize_letter_size(init_path, font_path, letter) print("preprocess_font done.") def font_string_to_svgs(self, dest_path, font, txt, size=30, spacing=1.0, target_control=None, subdivision_thresh=None): fontname = self.font
class Painter(DiffVGState): def __init__(self, font: str, canvas_size: int, device: torch.device): super(Painter, self).__init__(device=device, use_gpu=True, canvas_width=canvas_size, canvas_height=canvas_size) self.font = font def init_shape(self, path_svg, seed=0): assert pathlib.Path(path_svg).exists(), f"{path_svg} is not exist!" print(f"-> init svg from `{path_svg}` ...") # 1. load svg from path canvas_width, canvas_height, self.shapes, self.shape_groups = self.load_svg(path_svg) # 2. set learnable parameters self.set_point_parameters() img = self.render_warp(seed) img = img[:, :, 3:4] * img[:, :, :3] + \ torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) img = img[:, :, :3] img = img.unsqueeze(0) # convert img from HWC to NCHW img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW return img def get_image(self, step: int = 0): img = self.render_warp(step) img = img[:, :, 3:4] * img[:, :, :3] + \ torch.ones(img.shape[0], img.shape[1], 3, device=self.device) * (1 - img[:, :, 3:4]) img = img[:, :, :3] img = img.unsqueeze(0) # convert img from HWC to NCHW img = img.permute(0, 3, 1, 2).to(self.device) # NHWC -> NCHW return img def clip_curve_shape(self): for group in self.shape_groups: group.fill_color.data.clamp_(0.0, 1.0) def set_point_parameters(self): # stroke`s location optimization self.point_vars = [] for i, path in enumerate(self.shapes): path.points.requires_grad = True self.point_vars.append(path.points) def get_point_parameters(self): return self.point_vars def preprocess_font(self, word, letter, level_of_cc=1, font_path=None, init_path=None): if level_of_cc == 0: target_cp = None else: target_cp = {"A": 120, "B": 120, "C": 100, "D": 100, "E": 120, "F": 120, "G": 120, "H": 120, "I": 35, "J": 80, "K": 100, "L": 80, "M": 100, "N": 100, "O": 100, "P": 120, "Q": 120, "R": 130, "S": 110, "T": 90, "U": 100, "V": 100, "W": 100, "X": 130, "Y": 120, "Z": 120, "a": 120, "b": 120, "c": 100, "d": 100, "e": 120, "f": 120, "g": 120, "h": 120, "i": 35, "j": 80, "k": 100, "l": 80, "m": 100, "n": 100, "o": 100, "p": 120, "q": 120, "r": 130, "s": 110, "t": 90, "u": 100, "v": 100, "w": 100, "x": 130, "y": 120, "z": 120} target_cp = {k: v * level_of_cc for k, v in target_cp.items()} print("init_path: ", init_path) subdivision_thresh = None self.font_string_to_svgs(init_path, font_path, word, target_control=target_cp, subdivision_thresh=subdivision_thresh) self.normalize_letter_size(init_path, font_path, word) # optimize two adjacent letters print("letter: ", letter) if len(letter) > 1: subdivision_thresh = None self.font_string_to_svgs(init_path, font_path, letter, target_control=target_cp, subdivision_thresh=subdivision_thresh) self.normalize_letter_size(init_path, font_path, letter) print("preprocess_font done.") def font_string_to_svgs(self, dest_path, font, txt, size=30, spacing=1.0, target_control=None, subdivision_thresh=None): fontname = self.font
glyph_beziers = font_string_to_beziers(font, txt, size, spacing, merge=False, target_control=target_control)
1
2023-12-13 08:18:01+00:00
8k
lyhisme/DeST
libs/helper.py
[ { "identifier": "get_id2class_map", "path": "libs/class_id_map.py", "snippet": "def get_id2class_map(dataset: str, dataset_dir: str = \"./dataset\") -> Dict[int, str]:\n class2id_map = get_class2id_map(dataset, dataset_dir)\n\n return {val: key for key, val in class2id_map.items()}" }, { "identifier": "AverageMeter", "path": "libs/metric.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self, name: str, fmt: str = \":f\") -> None:\n self.name = name\n self.fmt = fmt\n self.reset()\n\n def reset(self) -> None:\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val: float, n: int = 1) -> None:\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n def __str__(self) -> str:\n fmtstr = \"{name} {val\" + self.fmt + \"} ({avg\" + self.fmt + \"})\"\n return fmtstr.format(**self.__dict__)" }, { "identifier": "BoundaryScoreMeter", "path": "libs/metric.py", "snippet": "class BoundaryScoreMeter(object):\n def __init__(self, tolerance=5, boundary_threshold=0.7):\n # max distance of the frame which can be regarded as correct\n self.tolerance = tolerance\n\n # threshold of the boundary value which can be regarded as action boundary\n self.boundary_threshold = boundary_threshold\n self.tp = 0.0 # true positive\n self.fp = 0.0 # false positive\n self.fn = 0.0 # false negative\n self.n_correct = 0.0\n self.n_frames = 0.0\n\n def update(self, preds, gts, masks):\n \"\"\"\n Args:\n preds: np.array. the model output(N, T)\n gts: np.array. boudnary ground truth array (N, T)\n masks: np.array. np.bool. valid length for each video (N, T)\n Return:\n Accuracy\n Boundary F1 Score\n \"\"\"\n\n for pred, gt, mask in zip(preds, gts, masks):\n # ignore invalid frames\n pred = pred[mask]\n gt = gt[mask]\n\n pred_idx = argrelmax(pred, threshold=self.boundary_threshold)\n gt_idx = argrelmax(gt, threshold=self.boundary_threshold)\n\n n_frames = pred.shape[0]\n tp = 0.0\n fp = 0.0\n fn = 0.0\n\n hits = np.zeros(len(gt_idx))\n\n # calculate true positive, false negative, false postive, true negative\n for i in range(len(pred_idx)):\n dist = np.abs(np.array(gt_idx) - pred_idx[i])\n min_dist = np.min(dist)\n idx = np.argmin(dist)\n\n if min_dist <= self.tolerance and hits[idx] == 0:\n tp += 1\n hits[idx] = 1\n else:\n fp += 1\n\n fn = len(gt_idx) - sum(hits)\n tn = n_frames - tp - fp - fn\n\n self.tp += tp\n self.fp += fp\n self.fn += fn\n self.n_frames += n_frames\n self.n_correct += tp + tn\n\n def get_scores(self):\n \"\"\"\n Return:\n Accuracy\n Boundary F1 Score\n \"\"\"\n\n # accuracy\n acc = 100 * self.n_correct / self.n_frames\n\n # Boudnary F1 Score\n precision = self.tp / float(self.tp + self.fp)\n recall = self.tp / float(self.tp + self.fn)\n\n f1s = 2.0 * (precision * recall) / (precision + recall + 1e-7)\n f1s = np.nan_to_num(f1s) * 100\n\n # Accuracy, Edit Distance, F1 Score\n return acc, precision * 100, recall * 100, f1s\n\n def save_scores(self, save_path: str) -> None:\n acc, precision, recall, f1s = self.get_scores()\n\n # save log\n columns = [\"bound_acc\", \"precision\", \"recall\", \"bound_f1s\"]\n data_dict = {\n \"bound_acc\": [acc],\n \"precision\": [precision],\n \"recall\": [recall],\n \"bound_f1s\": [f1s],\n }\n\n df = pd.DataFrame(data_dict, columns=columns)\n df.to_csv(save_path, index=False)\n\n def reset(self):\n self.tp = 0.0 # true positive\n self.fp = 0.0 # false positive\n self.fn = 0.0 # false negative\n self.n_correct = 0.0\n self.n_frames = 0.0" }, { "identifier": "ScoreMeter", "path": "libs/metric.py", "snippet": "class ScoreMeter(object):\n def __init__(\n self,\n id2class_map: Dict[int, str],\n iou_thresholds: Tuple[float] = (0.1, 0.25, 0.5),\n ignore_index: int = 255,\n ) -> None:\n\n self.iou_thresholds = iou_thresholds # threshold for f score\n self.ignore_index = ignore_index\n self.id2class_map = id2class_map\n self.edit_score = 0\n self.tp = [0 for _ in range(len(iou_thresholds))] # true positive\n self.fp = [0 for _ in range(len(iou_thresholds))] # false positive\n self.fn = [0 for _ in range(len(iou_thresholds))] # false negative\n self.n_correct = 0\n self.n_frames = 0\n self.n_videos = 0\n self.n_classes = len(self.id2class_map)\n self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))\n\n def _fast_hist(self, pred: np.ndarray, gt: np.ndarray) -> np.ndarray:\n mask = (gt >= 0) & (gt < self.n_classes)\n hist = np.bincount(\n self.n_classes * gt[mask].astype(int) + pred[mask],\n minlength=self.n_classes ** 2,\n ).reshape(self.n_classes, self.n_classes)\n return hist\n\n def update(\n self,\n outputs: np.ndarray,\n gts: np.ndarray,\n boundaries: Optional[np.ndarray] = None,\n masks: Optional[np.ndarray] = None,\n ) -> None:\n \"\"\"\n Args:\n outputs: np.array. shape(N, C, T)\n the model output for boundary prediciton\n gt: np.array. shape(N, T)\n Ground Truth for boundary\n \"\"\"\n if len(outputs.shape) == 3:\n preds = outputs.argmax(axis=1)\n elif len(outputs.shape) == 2:\n preds = copy.copy(outputs)\n\n for pred, gt in zip(preds, gts):\n pred = pred[gt != self.ignore_index]\n gt = gt[gt != self.ignore_index]\n\n for lt, lp in zip(pred, gt):\n self.confusion_matrix += self._fast_hist(lt.flatten(), lp.flatten())\n\n self.n_videos += 1\n # count the correct frame\n self.n_frames += len(pred)\n for i in range(len(pred)):\n if pred[i] == gt[i]:\n self.n_correct += 1\n\n # calculate the edit distance\n p_label, p_start, p_end = get_segments(pred, self.id2class_map)\n g_label, g_start, g_end = get_segments(gt, self.id2class_map)\n\n self.edit_score += levenshtein(p_label, g_label, norm=True)\n\n for i, th in enumerate(self.iou_thresholds):\n tp, fp, fn = get_n_samples(\n p_label, p_start, p_end, g_label, g_start, g_end, th\n )\n self.tp[i] += tp\n self.fp[i] += fp\n self.fn[i] += fn\n\n def get_scores(self) -> Tuple[float, float, float]:\n \"\"\"\n Return:\n Accuracy\n Normlized Edit Distance\n F1 Score of Each Threshold\n \"\"\"\n\n # accuracy\n acc = 100 * float(self.n_correct) / self.n_frames\n\n # edit distance\n edit_score = float(self.edit_score) / self.n_videos\n\n # F1 Score\n f1s = []\n for i in range(len(self.iou_thresholds)):\n precision = self.tp[i] / float(self.tp[i] + self.fp[i])\n recall = self.tp[i] / float(self.tp[i] + self.fn[i])\n\n f1 = 2.0 * (precision * recall) / (precision + recall + 1e-7)\n f1 = np.nan_to_num(f1) * 100\n\n f1s.append(f1)\n\n # Accuracy, Edit Distance, F1 Score\n return acc, edit_score, f1s\n\n def return_confusion_matrix(self) -> np.ndarray:\n return self.confusion_matrix\n\n def save_scores(self, save_path: str) -> None:\n acc, edit_score, segment_f1s = self.get_scores()\n\n # save log\n columns = [\"cls_acc\", \"edit\"]\n data_dict = {\n \"cls_acc\": [acc],\n \"edit\": [edit_score],\n }\n\n for i in range(len(self.iou_thresholds)):\n key = \"segment f1s@{}\".format(self.iou_thresholds[i])\n columns.append(key)\n data_dict[key] = [segment_f1s[i]]\n\n df = pd.DataFrame(data_dict, columns=columns)\n df.to_csv(save_path, index=False)\n\n def save_confusion_matrix(self, save_path: str) -> None:\n with open(save_path, \"w\") as file:\n writer = csv.writer(file, lineterminator=\"\\n\")\n writer.writerows(self.confusion_matrix)\n\n def reset(self) -> None:\n self.edit_score = 0\n self.tp = [0 for _ in range(len(self.iou_thresholds))] # true positive\n self.fp = [0 for _ in range(len(self.iou_thresholds))] # false positive\n self.fn = [0 for _ in range(len(self.iou_thresholds))] # false negative\n self.n_correct = 0\n self.n_frames = 0\n self.n_videos = 0\n self.confusion_matrix = np.zeros((self.n_classes, self.n_classes))" }, { "identifier": "PostProcessor", "path": "libs/postprocess.py", "snippet": "class PostProcessor(object):\n def __init__(\n self,\n name: str,\n boundary_th: int = 0.7,\n theta_t: int = 15,\n kernel_size: int = 15,\n ) -> None:\n self.func = {\n \"refinement_with_boundary\": self._refinement_with_boundary,\n \"relabeling\": self._relabeling,\n \"smoothing\": self._smoothing,\n }\n assert name in self.func\n\n self.name = name\n self.boundary_th = boundary_th\n self.theta_t = theta_t\n self.kernel_size = kernel_size\n\n if name == \"smoothing\":\n self.filter = GaussianSmoothing(self.kernel_size)\n\n def _is_probability(self, x: np.ndarray) -> bool:\n assert x.ndim == 3\n\n if x.shape[1] == 1:\n # sigmoid\n if x.min() >= 0 and x.max() <= 1:\n return True\n else:\n return False\n else:\n # softmax\n _sum = np.sum(x, axis=1).astype(np.float32)\n _ones = np.ones_like(_sum, dtype=np.float32)\n return np.allclose(_sum, _ones)\n\n def _convert2probability(self, x: np.ndarray) -> np.ndarray:\n \"\"\"\n Args: x (N, C, T)\n \"\"\"\n assert x.ndim == 3\n\n if self._is_probability(x):\n return x\n else:\n if x.shape[1] == 1:\n # sigmoid\n prob = 1 / (1 + np.exp(-x))\n else:\n # softmax\n prob = np.exp(x) / np.sum(np.exp(x), axis=1)\n return prob.astype(np.float32)\n\n def _convert2label(self, x: np.ndarray) -> np.ndarray:\n assert x.ndim == 2 or x.ndim == 3\n\n if x.ndim == 2:\n return x.astype(np.int64)\n else:\n if not self._is_probability(x):\n x = self._convert2probability(x)\n\n label = np.argmax(x, axis=1)\n return label.astype(np.int64)\n\n def _refinement_with_boundary(\n self,\n outputs: np.array,\n boundaries: np.ndarray,\n masks: np.ndarray,\n ) -> np.ndarray:\n \"\"\"\n Get segments which is defined as the span b/w two boundaries,\n and decide their classes by majority vote.\n Args:\n outputs: numpy array. shape (N, C, T)\n the model output for frame-level class prediction.\n boundaries: numpy array. shape (N, 1, T)\n boundary prediction.\n masks: np.array. np.bool. shape (N, 1, T)\n valid length for each video\n Return:\n preds: np.array. shape (N, T)\n final class prediction considering boundaries.\n \"\"\"\n\n preds = self._convert2label(outputs)\n boundaries = self._convert2probability(boundaries)\n\n for i, (output, pred, boundary, mask) in enumerate(\n zip(outputs, preds, boundaries, masks)\n ):\n boundary = boundary[mask]\n idx = argrelmax(boundary, threshold=self.boundary_th)\n\n # add the index of the last action ending\n T = pred.shape[0]\n idx.append(T)\n\n # majority vote\n for j in range(len(idx) - 1):\n count = np.bincount(pred[idx[j] : idx[j + 1]])\n modes = np.where(count == count.max())[0]\n if len(modes) == 1:\n mode = modes\n else:\n if outputs.ndim == 3:\n # if more than one majority class exist\n prob_sum_max = 0\n for m in modes:\n prob_sum = output[m, idx[j] : idx[j + 1]].sum()\n if prob_sum_max < prob_sum:\n mode = m\n prob_sum_max = prob_sum\n else:\n # decide first mode when more than one majority class\n # have the same number during oracle experiment\n mode = modes[0]\n\n preds[i, idx[j] : idx[j + 1]] = mode\n\n return preds\n\n def _relabeling(self, outputs: np.ndarray, **kwargs: np.ndarray) -> np.ndarray:\n \"\"\"\n Relabeling small action segments with their previous action segment\n Args:\n output: the results of action segmentation. (N, T) or (N, C, T)\n theta_t: the threshold of the size of action segments.\n Return:\n relabeled output. (N, T)\n \"\"\"\n\n preds = self._convert2label(outputs)\n\n for i in range(preds.shape[0]):\n # shape (T,)\n last = preds[i][0]\n cnt = 1\n for j in range(1, preds.shape[1]):\n if last == preds[i][j]:\n cnt += 1\n else:\n if cnt > self.theta_t:\n cnt = 1\n last = preds[i][j]\n else:\n preds[i][j - cnt : j] = preds[i][j - cnt - 1]\n cnt = 1\n last = preds[i][j]\n\n if cnt <= self.theta_t:\n preds[i][j - cnt : j] = preds[i][j - cnt - 1]\n\n return preds\n\n def _smoothing(self, outputs: np.ndarray, **kwargs: np.ndarray) -> np.ndarray:\n \"\"\"\n Smoothing action probabilities with gaussian filter.\n Args:\n outputs: frame-wise action probabilities. (N, C, T)\n Return:\n predictions: final prediction. (N, T)\n \"\"\"\n\n outputs = self._convert2probability(outputs)\n outputs = self.filter(torch.Tensor(outputs)).numpy()\n\n preds = self._convert2label(outputs)\n return preds\n\n def __call__(self, outputs, **kwargs: np.ndarray) -> np.ndarray:\n preds = self.func[self.name](outputs, **kwargs)\n return preds" } ]
import os import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from typing import Optional, Tuple from torch.utils.data import DataLoader from libs.class_id_map import get_id2class_map from libs.metric import AverageMeter, BoundaryScoreMeter, ScoreMeter from libs.postprocess import PostProcessor
4,692
def train( train_loader: DataLoader, model: nn.Module, criterion_cls: nn.Module, criterion_bound: nn.Module, lambda_bound_loss: float, optimizer: optim.Optimizer, device: str, ) -> float: losses = AverageMeter("Loss", ":.4e") # switch training mode model.train() for sample in train_loader: x = sample["feature"] t = sample["label"] b = sample["boundary"] mask = sample["mask"] x = x.to(device) t = t.to(device) b = b.to(device) mask = mask.to(device) batch_size = x.shape[0] # compute output and loss output_cls, output_bound = model(x, mask) loss = 0.0 if isinstance(output_cls, list): n = len(output_cls) for out in output_cls: loss += criterion_cls(out, t, x) / n else: loss += criterion_cls(output_cls, t, x) if isinstance(output_bound, list): n = len(output_bound) for out in output_bound: loss += lambda_bound_loss * criterion_bound(out, b, mask) / n else: loss += lambda_bound_loss * criterion_bound(output_bound, b, mask) # record loss losses.update(loss.item(), batch_size) optimizer.zero_grad() loss.backward() optimizer.step() return losses.avg def validate( val_loader: DataLoader, model: nn.Module, criterion_cls: nn.Module, criterion_bound: nn.Module, lambda_bound_loss: float, device: str, dataset: str, dataset_dir: str, iou_thresholds: Tuple[float], boundary_th: float, tolerance: int, refinement_method: Optional[str] = None ) -> Tuple[float, float, float, float, float, float, float, float, str]: losses = AverageMeter("Loss", ":.4e") postprocessor = PostProcessor(refinement_method, boundary_th) scores_cls = ScoreMeter( id2class_map=get_id2class_map(dataset, dataset_dir=dataset_dir), iou_thresholds=iou_thresholds, )
def train( train_loader: DataLoader, model: nn.Module, criterion_cls: nn.Module, criterion_bound: nn.Module, lambda_bound_loss: float, optimizer: optim.Optimizer, device: str, ) -> float: losses = AverageMeter("Loss", ":.4e") # switch training mode model.train() for sample in train_loader: x = sample["feature"] t = sample["label"] b = sample["boundary"] mask = sample["mask"] x = x.to(device) t = t.to(device) b = b.to(device) mask = mask.to(device) batch_size = x.shape[0] # compute output and loss output_cls, output_bound = model(x, mask) loss = 0.0 if isinstance(output_cls, list): n = len(output_cls) for out in output_cls: loss += criterion_cls(out, t, x) / n else: loss += criterion_cls(output_cls, t, x) if isinstance(output_bound, list): n = len(output_bound) for out in output_bound: loss += lambda_bound_loss * criterion_bound(out, b, mask) / n else: loss += lambda_bound_loss * criterion_bound(output_bound, b, mask) # record loss losses.update(loss.item(), batch_size) optimizer.zero_grad() loss.backward() optimizer.step() return losses.avg def validate( val_loader: DataLoader, model: nn.Module, criterion_cls: nn.Module, criterion_bound: nn.Module, lambda_bound_loss: float, device: str, dataset: str, dataset_dir: str, iou_thresholds: Tuple[float], boundary_th: float, tolerance: int, refinement_method: Optional[str] = None ) -> Tuple[float, float, float, float, float, float, float, float, str]: losses = AverageMeter("Loss", ":.4e") postprocessor = PostProcessor(refinement_method, boundary_th) scores_cls = ScoreMeter( id2class_map=get_id2class_map(dataset, dataset_dir=dataset_dir), iou_thresholds=iou_thresholds, )
scores_bound = BoundaryScoreMeter(
2
2023-12-12 02:27:15+00:00
8k
bolna-ai/bolna
bolna/agent_manager/task_manager.py
[ { "identifier": "BaseManager", "path": "bolna/agent_manager/base_manager.py", "snippet": "class BaseManager:\n def __init__(self):\n self.agent = \"bolna-agent\"" }, { "identifier": "create_ws_data_packet", "path": "bolna/helpers/utils.py", "snippet": "def create_ws_data_packet(data, meta_info=None, is_md5_hash=False, llm_generated=False):\n metadata = copy.deepcopy(meta_info)\n if meta_info is not None: #It'll be none in case we connect through dashboard playground\n metadata[\"is_md5_hash\"] = is_md5_hash\n metadata[\"llm_generated\"] = llm_generated\n return {\n 'data': data,\n 'meta_info': metadata\n }" }, { "identifier": "is_valid_md5", "path": "bolna/helpers/utils.py", "snippet": "def is_valid_md5(hash_string):\n return bool(re.fullmatch(r\"[0-9a-f]{32}\", hash_string))" }, { "identifier": "get_raw_audio_bytes_from_base64", "path": "bolna/helpers/utils.py", "snippet": "async def get_raw_audio_bytes_from_base64(agent_name, b64_string, audio_format='mp3', user_id = None, assistant_id=None, local = False):\n # we are already storing pcm formatted audio in the filler config. No need to encode/decode them further\n audio_data = None\n if local:\n file_name = f\"{PREPROCESS_DIR}/{agent_name}/{audio_format}/{b64_string}.{audio_format}\"\n with open(file_name, 'rb') as file:\n # Read the entire file content into a variable\n audio_data = file.read()\n else:\n object_key = f\"{user_id}/{assistant_id}/audio/{b64_string}.{audio_format}\"\n audio_data = await get_s3_file(BUCKET_NAME, object_key)\n\n return audio_data" }, { "identifier": "get_required_input_types", "path": "bolna/helpers/utils.py", "snippet": "def get_required_input_types(task):\n input_types = dict()\n for i, chain in enumerate(task['toolchain']['pipelines']):\n first_model = chain[0]\n if chain[0] == \"transcriber\":\n input_types[\"audio\"] = i\n elif chain[0] == \"synthesizer\" or chain[0] == \"llm\":\n input_types[\"text\"] = i\n return input_types" }, { "identifier": "format_messages", "path": "bolna/helpers/utils.py", "snippet": "def format_messages(messages):\n formatted_string = \"\"\n for message in messages:\n role = message['role']\n content = message['content']\n\n if role == 'assistant':\n formatted_string += \"assistant: \" + content + \"\\n\"\n elif role == 'user':\n formatted_string += \"user: \" + content + \"\\n\"\n\n return formatted_string" }, { "identifier": "get_prompt_responses", "path": "bolna/helpers/utils.py", "snippet": "async def get_prompt_responses(agent_name, local=False, user_id=None, assistant_id = None):\n filepath = f\"{PREPROCESS_DIR}/{agent_name}/conversation_details.json\"\n data = \"\"\n if local:\n logger.info(\"Loading up the conversation details from the local file\")\n try:\n with open(filepath, \"r\") as json_file:\n data = json.load(json_file)\n except Exception as e:\n logger.error(\"Could not load up the dataset\")\n else:\n key = f\"{user_id}/{assistant_id}/conversation_details.json\"\n logger.info(f\"Loading up the conversation details from the s3 file BUCKET_NAME {BUCKET_NAME} {key}\")\n try:\n response = await get_s3_file(BUCKET_NAME, key)\n file_content = response.decode('utf-8')\n json_content = json.loads(file_content)\n return json_content\n\n except Exception as e:\n traceback.print_exc()\n print(f\"An error occurred: {e}\")\n return None\n\n return data" }, { "identifier": "update_prompt_with_context", "path": "bolna/helpers/utils.py", "snippet": "def update_prompt_with_context(prompt, context_data):\n if not isinstance(context_data.get('recipient_data'), dict):\n return prompt\n return prompt.format(**context_data.get('recipient_data', {}))" }, { "identifier": "get_md5_hash", "path": "bolna/helpers/utils.py", "snippet": "def get_md5_hash(text):\n return hashlib.md5(text.encode()).hexdigest()" }, { "identifier": "clean_json_string", "path": "bolna/helpers/utils.py", "snippet": "def clean_json_string(json_str):\n if json_str.startswith(\"```json\") and json_str.endswith(\"```\"):\n json_str = json_str[7:-3].strip()\n return json_str" }, { "identifier": "yield_chunks_from_memory", "path": "bolna/helpers/utils.py", "snippet": "def yield_chunks_from_memory(audio_bytes, chunk_size=512):\n total_length = len(file_in_memory)\n for i in range(0, total_length, chunk_size):\n yield file_in_memory[i:i + chunk_size]" }, { "identifier": "configure_logger", "path": "bolna/helpers/logger_config.py", "snippet": "def configure_logger(file_name, enabled=True, logging_level='INFO'):\n if logging_level not in VALID_LOGGING_LEVELS:\n logging_level = \"INFO\"\n\n logging.basicConfig(\n level=logging_level,\n format=\"%(asctime)s.%(msecs)03d %(levelname)s {%(module)s} [%(funcName)s] %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n\n logger = logging.getLogger(file_name)\n\n if not enabled:\n logger.disabled = True\n return logger" } ]
import asyncio import traceback import time import json from .base_manager import BaseManager from bolna.agent_types import * from bolna.providers import * from bolna.helpers.utils import create_ws_data_packet, is_valid_md5, get_raw_audio_bytes_from_base64, \ get_required_input_types, format_messages, get_prompt_responses, update_prompt_with_context, get_md5_hash, clean_json_string, yield_chunks_from_memory from bolna.helpers.logger_config import configure_logger
4,273
await self.tools["output"].handle_interruption() self.sequence_ids = set() #Remove all the sequence ids so subsequent won't be processed if self.llm_task is not None: self.llm_task.cancel() self.llm_task = None self.was_long_pause = True # if len(self.synthesizer_tasks) > 0: # for synth_task in self.synthesizer_tasks: # synth_task.cancel() # self.synthesizer_tasks = [] ######################## # Transcriber task ######################## async def _handle_transcriber_output(self, next_task, transcriber_message, meta_info): if next_task == "llm": meta_info["origin"] = "transcriber" self.llm_task = asyncio.create_task( self._run_llm_task(create_ws_data_packet(transcriber_message, meta_info))) elif next_task == "synthesizer": self.synthesizer_tasks.append(asyncio.create_task( self._synthesize(create_ws_data_packet(transcriber_message, meta_info)))) else: logger.info(f"Need to separate out output task") async def _listen_transcriber(self): transcriber_message = "" start_time = None try: if self.stream: async for message in self.tools["transcriber"].transcribe(): if message['data'] == "transcriber_connection_closed": self.transcriber_duration += message['meta_info']["transcriber_duration"] logger.info("transcriber connection closed") return self._set_call_details(message) meta_info = message["meta_info"] sequence = await self.process_transcriber_request(meta_info) if message['data'] == "TRANSCRIBER_BEGIN": logger.info("starting transcriber stream") start_time = time.time() await self.tools["output"].handle_interruption() if self.llm_task is not None: logger.info("Cancelling LLM Task as it's on") self.llm_task.cancel() self.llm_task = None self.was_long_pause = True if len(self.synthesizer_tasks) > 0: logger.info("Cancelling Synthesizer tasks") for synth_task in self.synthesizer_tasks: synth_task.cancel() self.synthesizer_tasks = [] continue elif message['data'] == "TRANSCRIBER_END": logger.info("transcriber stream and preparing the next step") next_task = self._get_next_step(sequence, "transcriber") logger.info(f'got the next task {next_task}') if self.was_long_pause: logger.info( f"Seems like there was a long pause {self.history[-1]['content']} , {transcriber_message}") message = self.history[-1]['content'] + " " + transcriber_message self.history = self.history[:-1] self.was_long_pause = False logger.info(f'invoking next_task {next_task} with transcriber_message: {transcriber_message}') await self._handle_transcriber_output(next_task, transcriber_message, meta_info) transcriber_message = "" continue else: logger.info("processed text from transcriber: {}".format(message['data'])) transcriber_message += message['data'] else: logger.info("Not a streaming conversation. Hence getting a full blown transcript") async for message in self.tools["transcriber"].transcribe(): logger.info(f"message from transcriber {message}") sequence = message["meta_info"]["sequence"] next_task = self._get_next_step(sequence, "transcriber") self.transcriber_duration += message["meta_info"]["transcriber_duration"] if "transcriber_duration" in message["meta_info"] else 0 await self._handle_transcriber_output(next_task, message['data'], message["meta_info"]) except Exception as e: traceback.print_exc() logger.error(f"Error in transcriber {e}") async def __listen_synthesizer(self): try: if self.stream and self.synthesizer_provider != "polly": logger.info("Opening websocket connection to synthesizer") await self.tools["synthesizer"].open_connection() while True: logger.info("Listening to synthesizer") async for message in self.tools["synthesizer"].generate(): if not self.conversation_ended and message["meta_info"]["sequence_id"] in self.sequence_ids: await self.tools["output"].handle(message) if "end_of_synthesizer_stream" in message["meta_info"] and message["meta_info"]["end_of_synthesizer_stream"]: logger.info(f"Got End of stream and hence removing from sequence ids {self.sequence_ids} {message['meta_info']['sequence_id']}") self.sequence_ids.remove(message["meta_info"]["sequence_id"]) await asyncio.sleep(1) except Exception as e: logger.error(f"Error in synthesizer {e}") async def _synthesize(self, message): meta_info = message["meta_info"] text = message["data"] meta_info["type"] = "audio" try: if meta_info["is_md5_hash"]: logger.info('sending preprocessed audio response to {}'.format( self.task_config["tools_config"]["output"]["provider"]))
logger = configure_logger(__name__) class TaskManager(BaseManager): def __init__(self, assistant_name, task_id, task, ws, input_parameters=None, context_data=None, user_id=None, assistant_id=None, run_id=None, connected_through_dashboard=False, cache = None): super().__init__() logger.info(f"doing task {task}") self.task_id = task_id self.assistant_name = assistant_name self.tools = {} self.websocket = ws self.task_config = task self.context_data = context_data self.connected_through_dashboard = connected_through_dashboard # Set up communication queues between processes self.audio_queue = asyncio.Queue() self.llm_queue = asyncio.Queue() self.synthesizer_queue = asyncio.Queue() self.pipelines = task['toolchain']['pipelines'] self.textual_chat_agent = False if task['toolchain']['pipelines'][0] == "llm" and task["tools_config"]["llm_agent"][ "agent_task"] == "conversation": self.textual_chat_agent = False self.start_time = time.time() # Assistant persistance stuff self.user_id = user_id self.assistant_id = assistant_id self.run_id = run_id self.mark_set = set() self.conversation_ended = False # Prompts self.prompts, self.system_prompt = {}, {} self.input_parameters = input_parameters self.queues = { "transcriber": self.audio_queue, "llm": self.llm_queue, "synthesizer": self.synthesizer_queue } if task_id == 0: if self.task_config["tools_config"]["input"]["provider"] in SUPPORTED_INPUT_HANDLERS.keys(): logger.info(f"Connected through dashboard {connected_through_dashboard}") if connected_through_dashboard: # If connected through dashboard get basic dashboard class input_handler_class = SUPPORTED_INPUT_HANDLERS.get("default") else: input_handler_class = SUPPORTED_INPUT_HANDLERS.get( self.task_config["tools_config"]["input"]["provider"]) self.tools["input"] = input_handler_class(self.queues, self.websocket, get_required_input_types(task), self.mark_set, self.connected_through_dashboard) else: raise "Other input handlers not supported yet" if self.task_config["tools_config"]["output"] is None: logger.info("Not setting up any output handler as it is none") elif self.task_config["tools_config"]["output"]["provider"] in SUPPORTED_OUTPUT_HANDLERS.keys(): output_handler_class = SUPPORTED_OUTPUT_HANDLERS.get(self.task_config["tools_config"]["output"]["provider"]) if self.task_config["tools_config"]["output"]["provider"] == "twilio": logger.info(f"Making sure that the sampling rate for output handler is 8000") self.task_config['tools_config']['synthesizer']['provider_config']['sampling_rate'] = 8000 self.task_config['tools_config']['synthesizer']['audio_format'] = 'pcm' self.tools["output"] = output_handler_class(self.websocket, self.mark_set) else: raise "Other input handlers not supported yet" # Current conversation state self.current_request_id = None self.previous_request_id = None self.llm_rejected_request_ids = set() self.llm_processed_request_ids = set() # Agent stuff self.history = [] self.label_flow = [] # Setup IO SERVICE, TRANSCRIBER, LLM, SYNTHESIZER self.llm_task = None self.synthesizer_tasks = [] # state of conversation self.was_long_pause = False # Call conversations self.call_sid = None self.stream_sid = None # metering self.transcriber_duration = 0 self.synthesizer_characters = 0 self.ended_by_assistant = False self.extracted_data = None self.summarized_data = None #self.stream = not connected_through_dashboard and "synthesizer" in self.task_config["tools_config"] and self.task_config["tools_config"]["synthesizer"]["stream"] self.stream = not connected_through_dashboard #Currently we are allowing only realtime conversation based usecases. Hence it'll always be true unless connected through dashboard self.is_local = False # Memory self.cache = cache logger.info("task initialization completed") # Sequence id for interruption self.curr_sequence_id = 0 self.sequence_ids = set() async def load_prompt(self, assistant_name, task_id, is_local): logger.info("prompt and config setup started") self.is_local = is_local prompt_responses = await get_prompt_responses(assistant_name, assistant_id=self.assistant_id, user_id=self.user_id, local=self.is_local) self.prompts = prompt_responses["task_{}".format(task_id + 1)] if "system_prompt" in self.prompts: # This isn't a graph based agent enriched_prompt = self.prompts["system_prompt"] if self.context_data is not None: enriched_prompt = update_prompt_with_context(self.prompts["system_prompt"], self.context_data) self.system_prompt = { 'role': "system", 'content': enriched_prompt } else: self.system_prompt = { 'role': "system", 'content': "" } self.history = [self.system_prompt] llm_config = { "streaming_model": self.task_config["tools_config"]["llm_agent"]["streaming_model"], "classification_model": self.task_config["tools_config"]["llm_agent"]["classification_model"] } # setting transcriber if self.task_config["tools_config"]["transcriber"] is not None: provider = "playground" if self.connected_through_dashboard else self.task_config["tools_config"]["input"][ "provider"] self.task_config["tools_config"]["transcriber"]["input_queue"] = self.audio_queue if self.task_config["tools_config"]["transcriber"]["model"] in SUPPORTED_TRANSCRIBER_MODELS.keys(): if self.connected_through_dashboard: self.task_config["tools_config"]["transcriber"]["stream"] = False transcriber_class = SUPPORTED_TRANSCRIBER_MODELS.get( self.task_config["tools_config"]["transcriber"]["model"]) self.tools["transcriber"] = transcriber_class(provider, **self.task_config["tools_config"]["transcriber"]) # setting synthesizer logger.info(f"Synthesizer config: {self.task_config['tools_config']['synthesizer']}") if self.task_config["tools_config"]["synthesizer"] is not None: self.synthesizer_provider = self.task_config["tools_config"]["synthesizer"].pop("provider") synthesizer_class = SUPPORTED_SYNTHESIZER_MODELS.get(self.synthesizer_provider) provider_config = self.task_config["tools_config"]["synthesizer"].pop("provider_config") if self.connected_through_dashboard: self.task_config["tools_config"]["synthesizer"]["audio_format"] = "mp3" # Hard code mp3 if we're connected through dashboard self.task_config["tools_config"]["synthesizer"]["stream"] = False #Hardcode stream to be False as we don't want to get blocked by a __listen_synthesizer co-routine self.tools["synthesizer"] = synthesizer_class(**self.task_config["tools_config"]["synthesizer"], **provider_config) llm_config["max_tokens"] = self.task_config["tools_config"]["synthesizer"].get('max_tokens') llm_config["buffer_size"] = self.task_config["tools_config"]["synthesizer"].get('buffer_size') # setting llm if self.task_config["tools_config"]["llm_agent"]["family"] in SUPPORTED_LLM_MODELS.keys(): llm_class = SUPPORTED_LLM_MODELS.get(self.task_config["tools_config"]["llm_agent"]["family"]) llm = llm_class(**llm_config) else: raise Exception(f'LLM {self.task_config["tools_config"]["llm_agent"]["family"]} not supported') if self.task_config["task_type"] == "conversation": if self.task_config["tools_config"]["llm_agent"]["agent_flow_type"] == "streaming": self.tools["llm_agent"] = StreamingContextualAgent(llm) elif self.task_config["tools_config"]["llm_agent"]["agent_flow_type"] in ("preprocessed", "formulaic"): preprocessed = self.task_config["tools_config"]["llm_agent"]["agent_flow_type"] == "preprocessed" logger.info(f"LLM TYPE {type(llm)}") self.tools["llm_agent"] = GraphBasedConversationAgent(llm, context_data=self.context_data, prompts=self.prompts, preprocessed=preprocessed) elif self.task_config["task_type"] == "extraction": logger.info("Setting up extraction agent") self.tools["llm_agent"] = ExtractionContextualAgent(llm, prompt=self.system_prompt) self.extracted_data = None elif self.task_config["task_type"] == "summarization": logger.info("Setting up summarization agent") self.tools["llm_agent"] = SummarizationContextualAgent(llm, prompt=self.system_prompt) self.summarized_data = None logger.info("prompt and config setup completed") ######################## # LLM task ######################## async def _handle_llm_output(self, next_step, text_chunk, should_bypass_synth, meta_info): logger.info("received text from LLM for output processing: {}".format(text_chunk)) if next_step == "synthesizer" and not should_bypass_synth: task = asyncio.gather(self._synthesize(create_ws_data_packet(text_chunk, meta_info))) self.synthesizer_tasks.append(asyncio.ensure_future(task)) elif self.tools["output"] is not None: await self.tools["output"].handle(create_ws_data_packet(text_chunk, meta_info)) def _get_next_step(self, sequence, origin): try: return next((self.pipelines[sequence][i + 1] for i in range(len(self.pipelines[sequence]) - 1) if self.pipelines[sequence][i] == origin), "output") except Exception as e: logger.error(f"Error getting next step: {e}") def _set_call_details(self, message): if self.call_sid is not None and self.stream_sid is not None and "call_sid" not in message['meta_info'] and "stream_sid" not in message['meta_info']: return if "call_sid" in message['meta_info']: self.call_sid = message['meta_info']["call_sid"] if "stream_sid" in message: self.stream_sid = message['meta_info']["stream_sid"] async def _process_followup_task(self, message, sequence, meta_info): message = format_messages(self.input_parameters["messages"]) # Remove the initial system prompt self.history.append({ 'role': 'user', 'content': message }) json_data = await self.tools["llm_agent"].generate(self.history) if "summary" in json_data: logger.info(f'Summary {json_data["summary"]}') self.summarized_data = json_data["summary"] else: json_data = clean_json_string(json_data) logger.info(f"After replacing {json_data}") json_data = json.loads(json_data) self.extracted_data = json_data logger.info("Done") async def _process_conversation_preprocessed_task(self, message, sequence, meta_info): if self.task_config["tools_config"]["llm_agent"]['agent_flow_type'] == "preprocessed": llm_response = "" self.history.append({ 'role': 'user', 'content': message['data'] }) start_time = time.time() async for text_chunk in self.tools['llm_agent'].generate(self.history, stream=True, synthesize=True, label_flow=self.label_flow): if text_chunk == "<end_of_conversation>": logger.info("Got end of conversation. I'm stopping now") self.conversation_ended = True await asyncio.sleep(5) #Make sure that the message is passed over and complete before cutting the handler await self.tools["input"].stop_handler() logger.info("Stopped input handler") if "transcriber" in self.tools and not self.connected_through_dashboard: logger.info("Stopping transcriber") await self.tools["transcriber"].toggle_connection() await asyncio.sleep(5) # Making sure whatever message was passed is over return logger.info(f"Text chunk {text_chunk}") if is_valid_md5(text_chunk): self.synthesizer_tasks.append(asyncio.create_task( self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=True)))) else: self.synthesizer_tasks.append(asyncio.create_task( self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=False)))) async def _process_conversation_formulaic_task(self, message, sequence, meta_info): self.history.append({ 'role': 'user', 'content': message['data'] }) start_time = time.time() llm_response = "" logger.info("Agent flow is formulaic and hence moving smoothly") async for text_chunk in self.tools['llm_agent'].generate(self.history, stream=True, synthesize=True): if is_valid_md5(text_chunk): self.synthesizer_tasks.append(asyncio.create_task( self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=True)))) else: # TODO Make it more modular llm_response += " " +text_chunk next_step = self._get_next_step(sequence, "llm") if next_step == "synthesizer": task = asyncio.gather(self._synthesize(create_ws_data_packet(text_chunk, meta_info))) self.synthesizer_tasks.append(asyncio.ensure_future(task)) else: logger.info(f"Sending output text {sequence}") await self.tools["output"].handle(create_ws_data_packet(text_chunk, meta_info)) self.synthesizer_tasks.append(asyncio.create_task( self._synthesize(create_ws_data_packet(text_chunk, meta_info, is_md5_hash=False)))) async def _process_conversation_task(self, message, sequence, meta_info): next_step = None logger.info("agent flow is not preprocessed") llm_response = "" self.history.append({ 'role': 'user', 'content': message['data'] }) start_time = time.time() should_bypass_synth = 'bypass_synth' in meta_info and meta_info['bypass_synth'] == True next_step = self._get_next_step(sequence, "llm") curr_sequence_id = self.curr_sequence_id + 1 meta_info["sequence_id"] = curr_sequence_id cache_response = self.cache.get(get_md5_hash(message['data'])) if self.cache is not None else None if cache_response is not None: logger.info("It was a cache hit and hence simply returning") await self._handle_llm_output(next_step, cache_response, should_bypass_synth, meta_info) else: async for llm_message in self.tools['llm_agent'].generate(self.history, synthesize=True): text_chunk, end_of_llm_stream = llm_message logger.info(f"###### time to get the first chunk {time.time() - start_time} {text_chunk}") llm_response += " " + text_chunk if end_of_llm_stream: meta_info["end_of_llm_stream"] = True if self.stream: await self._handle_llm_output(next_step, text_chunk, should_bypass_synth, meta_info) if not self.stream: meta_info["end_of_llm_stream"]= True await self._handle_llm_output(next_step, llm_response, should_bypass_synth, meta_info) #add to cache # if self.cache is not None: # self.cache.set(get_md5_hash(message['data']), llm_response) if self.current_request_id in self.llm_rejected_request_ids: logger.info("User spoke while LLM was generating response") else: self.history.append({"role": "assistant", "content": llm_response}) # TODO : Write a better check for completion prompt #answer = await self.tools["llm_agent"].check_for_completion(self.history) answer = False if answer: logger.info("Got end of conversation. I'm stopping now") self.conversation_ended = True self.ended_by_assistant = True await self.tools["input"].stop_handler() logger.info("Stopped input handler") if "transcriber" in self.tools and not self.connected_through_dashboard: logger.info("Stopping transcriber") await self.tools["transcriber"].toggle_connection() await asyncio.sleep(5) # Making sure whatever message was passed is over return self.llm_processed_request_ids.add(self.current_request_id) llm_response = "" def _extract_sequence_and_meta(self, message): sequence, meta_info = None, None if isinstance(message, dict) and "meta_info" in message: self._set_call_details(message) sequence = message["meta_info"]["sequence"] meta_info = message["meta_info"] return sequence, meta_info def _is_extraction_task(self): return self.task_config["task_type"] == "extraction" def _is_summarization_task(self): return self.task_config["task_type"] == "summarization" def _is_conversation_task(self): return self.task_config["task_type"] == "conversation" def _is_preprocessed_flow(self): return self.task_config["tools_config"]["llm_agent"]['agent_flow_type'] == "preprocessed" def _is_formulaic_flow(self): return self.task_config["tools_config"]["llm_agent"]['agent_flow_type'] == "formulaic" # This is used only in the case it's a text based chatbot async def _listen_llm_input_queue(self): logger.info( f"Starting listening to LLM queue as either Connected to dashboard = {self.connected_through_dashboard} or it's a textual chat agent {self.textual_chat_agent}") while True: try: ws_data_packet = await self.queues["llm"].get() logger.info(f"ws_data_packet {ws_data_packet}") bos_packet = create_ws_data_packet("<beginning_of_stream>", ws_data_packet['meta_info']) await self.tools["output"].handle(bos_packet) await self._run_llm_task( ws_data_packet) # In case s3 is down and it's an audio processing job, this might produce blank message on the frontend of playground. eos_packet = create_ws_data_packet("<end_of_stream>", ws_data_packet['meta_info']) await self.tools["output"].handle(eos_packet) except Exception as e: traceback.print_exc() logger.error(f"Something went wrong with LLM queue {e}") break async def _run_llm_task(self, message): logger.info("running llm based agent") sequence, meta_info = self._extract_sequence_and_meta(message) try: if self._is_extraction_task() or self._is_summarization_task(): await self._process_followup_task(message, sequence, meta_info) elif self._is_conversation_task(): if self._is_preprocessed_flow(): await self._process_conversation_preprocessed_task(message, sequence, meta_info) elif self._is_formulaic_flow(): await self._process_conversation_formulaic_task(message, sequence, meta_info) else: await self._process_conversation_task(message, sequence, meta_info) else: logger.error("unsupported task type: {}".format(self.task_config["task_type"])) self.llm_task = None except Exception as e: traceback.print_exc() logger.error(f"Something went wrong in llm: {e}") async def process_transcriber_request(self, meta_info): if not self.current_request_id or self.current_request_id != meta_info["request_id"]: self.previous_request_id, self.current_request_id = self.current_request_id, meta_info["request_id"] sequence = meta_info["sequence"] # check if previous request id is not in transmitted request id if self.previous_request_id is None: is_first_message = True elif self.previous_request_id not in self.llm_processed_request_ids: self.llm_rejected_request_ids.add(self.previous_request_id) else: skip_append_to_data = False return sequence async def process_interruption(self): await self.tools["output"].handle_interruption() self.sequence_ids = set() #Remove all the sequence ids so subsequent won't be processed if self.llm_task is not None: self.llm_task.cancel() self.llm_task = None self.was_long_pause = True # if len(self.synthesizer_tasks) > 0: # for synth_task in self.synthesizer_tasks: # synth_task.cancel() # self.synthesizer_tasks = [] ######################## # Transcriber task ######################## async def _handle_transcriber_output(self, next_task, transcriber_message, meta_info): if next_task == "llm": meta_info["origin"] = "transcriber" self.llm_task = asyncio.create_task( self._run_llm_task(create_ws_data_packet(transcriber_message, meta_info))) elif next_task == "synthesizer": self.synthesizer_tasks.append(asyncio.create_task( self._synthesize(create_ws_data_packet(transcriber_message, meta_info)))) else: logger.info(f"Need to separate out output task") async def _listen_transcriber(self): transcriber_message = "" start_time = None try: if self.stream: async for message in self.tools["transcriber"].transcribe(): if message['data'] == "transcriber_connection_closed": self.transcriber_duration += message['meta_info']["transcriber_duration"] logger.info("transcriber connection closed") return self._set_call_details(message) meta_info = message["meta_info"] sequence = await self.process_transcriber_request(meta_info) if message['data'] == "TRANSCRIBER_BEGIN": logger.info("starting transcriber stream") start_time = time.time() await self.tools["output"].handle_interruption() if self.llm_task is not None: logger.info("Cancelling LLM Task as it's on") self.llm_task.cancel() self.llm_task = None self.was_long_pause = True if len(self.synthesizer_tasks) > 0: logger.info("Cancelling Synthesizer tasks") for synth_task in self.synthesizer_tasks: synth_task.cancel() self.synthesizer_tasks = [] continue elif message['data'] == "TRANSCRIBER_END": logger.info("transcriber stream and preparing the next step") next_task = self._get_next_step(sequence, "transcriber") logger.info(f'got the next task {next_task}') if self.was_long_pause: logger.info( f"Seems like there was a long pause {self.history[-1]['content']} , {transcriber_message}") message = self.history[-1]['content'] + " " + transcriber_message self.history = self.history[:-1] self.was_long_pause = False logger.info(f'invoking next_task {next_task} with transcriber_message: {transcriber_message}') await self._handle_transcriber_output(next_task, transcriber_message, meta_info) transcriber_message = "" continue else: logger.info("processed text from transcriber: {}".format(message['data'])) transcriber_message += message['data'] else: logger.info("Not a streaming conversation. Hence getting a full blown transcript") async for message in self.tools["transcriber"].transcribe(): logger.info(f"message from transcriber {message}") sequence = message["meta_info"]["sequence"] next_task = self._get_next_step(sequence, "transcriber") self.transcriber_duration += message["meta_info"]["transcriber_duration"] if "transcriber_duration" in message["meta_info"] else 0 await self._handle_transcriber_output(next_task, message['data'], message["meta_info"]) except Exception as e: traceback.print_exc() logger.error(f"Error in transcriber {e}") async def __listen_synthesizer(self): try: if self.stream and self.synthesizer_provider != "polly": logger.info("Opening websocket connection to synthesizer") await self.tools["synthesizer"].open_connection() while True: logger.info("Listening to synthesizer") async for message in self.tools["synthesizer"].generate(): if not self.conversation_ended and message["meta_info"]["sequence_id"] in self.sequence_ids: await self.tools["output"].handle(message) if "end_of_synthesizer_stream" in message["meta_info"] and message["meta_info"]["end_of_synthesizer_stream"]: logger.info(f"Got End of stream and hence removing from sequence ids {self.sequence_ids} {message['meta_info']['sequence_id']}") self.sequence_ids.remove(message["meta_info"]["sequence_id"]) await asyncio.sleep(1) except Exception as e: logger.error(f"Error in synthesizer {e}") async def _synthesize(self, message): meta_info = message["meta_info"] text = message["data"] meta_info["type"] = "audio" try: if meta_info["is_md5_hash"]: logger.info('sending preprocessed audio response to {}'.format( self.task_config["tools_config"]["output"]["provider"]))
audio_chunk = await get_raw_audio_bytes_from_base64(self.assistant_name, text,
3
2023-12-13 09:07:35+00:00
8k
relari-ai/continuous-eval
tests/retrieval_metrics_test.py
[ { "identifier": "LLMFactory", "path": "continuous_eval/llm_factory.py", "snippet": "class LLMFactory(LLMInterface):\n def __init__(self, model):\n super().__init__()\n self.model = model\n if model in [\"gpt-3.5-turbo-1106\", \"gpt-3.5-turbo-16k\", \"gpt-4-1106-preview\"]:\n self.client = OpenAI()\n elif model in [\"claude-2.1\", \"claude-2.0\", \"claude-instant-1.2\"]:\n assert ANTHROPIC_AVAILABLE, \"Anthropic is not available. Please install it.\"\n self.client = Anthropic()\n elif model in [\"gemini-pro\"]:\n assert GOOGLE_GENAI_AVAILABLE, \"Google GenAI is not available. Please install it.\"\n self.client = google_genai.GenerativeModel(model_name=model)\n else:\n raise ValueError(\n f\"Model {model} is not supported. \"\n \"Please choose one of the following models: \"\n \"gpt-3.5-turbo-1106, gpt-4-1106-preview, gemini-pro, claude-2.1, claude-2.0, claude-instant-1.2.\"\n )\n\n def _llm_response(self, prompt, temperature):\n \"\"\"\n Send a prompt to the LLM and return the response.\n \"\"\"\n if isinstance(self.client, OpenAI):\n # Leverage JSON mode in OpenAI API. Make sure the system prompt contains \"Output JSON\".\n if \"Output JSON\" in prompt[\"system_prompt\"]:\n response = self.client.chat.completions.create(\n model=self.model,\n response_format={\"type\": \"json_object\"},\n messages=[\n {\"role\": \"system\", \"content\": prompt[\"system_prompt\"]},\n {\"role\": \"user\", \"content\": prompt[\"user_prompt\"]},\n ],\n seed=0,\n temperature=temperature,\n max_tokens=1024,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0,\n )\n else:\n response = self.client.chat.completions.create(\n model=self.model,\n messages=[\n {\"role\": \"system\", \"content\": prompt[\"system_prompt\"]},\n {\"role\": \"user\", \"content\": prompt[\"user_prompt\"]},\n ],\n seed=0,\n temperature=temperature,\n max_tokens=1024,\n top_p=1,\n frequency_penalty=0,\n presence_penalty=0,\n )\n content = response.choices[0].message.content\n elif ANTHROPIC_AVAILABLE and isinstance(self.client, Anthropic):\n response = self.client.completions.create(\n model=\"claude-2.1\",\n max_tokens_to_sample=1024,\n temperature=temperature,\n prompt=f\"{prompt['system_prompt']}{HUMAN_PROMPT}{prompt['user_prompt']}{AI_PROMPT}\",\n )\n content = response.completion\n elif GOOGLE_GENAI_AVAILABLE and isinstance(self.client, google_genai.GenerativeModel):\n generation_config = {\n \"temperature\": temperature,\n \"top_p\": 1,\n \"top_k\": 1,\n \"max_output_tokens\": 1024,\n }\n safety_settings = [\n {\n \"category\": \"HARM_CATEGORY_HARASSMENT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n ]\n response = self.client.generate_content(\n f\"{prompt['system_prompt']}\\n{prompt['user_prompt']}\",\n generation_config=generation_config,\n safety_settings=safety_settings,\n )\n content = response.text\n else:\n raise ValueError(f\"Unknown model client\")\n\n return content\n\n def run(self, prompt, temperature=0):\n \"\"\"\n Run the LLM and return the response.\n Default temperature: 0\n \"\"\"\n content = self._llm_response(prompt=prompt, temperature=temperature)\n return content" }, { "identifier": "LLMBasedContextCoverage", "path": "continuous_eval/metrics/retrieval_LLM_based_metrics.py", "snippet": "class LLMBasedContextCoverage(LLMBasedMetric):\n def __init__(self, model: LLMInterface = DefaultLLM, use_few_shot: bool = True):\n super().__init__(model)\n self.use_few_shot = use_few_shot\n\n def __str__(self):\n return f\"LLMBasedContextCoverage(model={self.model}, use_few_shot={self.use_few_shot})\"\n\n def calculate(self, question, retrieved_contexts, answer, **kwargs):\n \"\"\"\n Calculate the context relevance score for the given datapoint.\n \"\"\"\n context = \"\\n\".join(retrieved_contexts)\n\n few_shot_prompt = (\n \"\"\"Example:\nquestion: What are the main characteristics of Jupiter?\ncontext: Jupiter is the fifth planet from the Sun and the largest in the Solar System. It is a gas giant with a mass more than two and a half times that of all the other planets in the Solar System combined, but less than one-thousandth the mass of the Sun. Jupiter is known for its prominent Great Red Spot, a giant storm larger than Earth that has been ongoing for hundreds of years.\nanswer: Jupiter is the largest planet in our Solar System and has a giant storm known as the Great Red Spot.\nclassification:\n[\n {{\n \"statement_1\":\"Jupiter is the largest planet in the Solar System.\",\n \"reason\": \"This is directly stated in the context.\",\n \"Attributed\": 1\n }},\n {{\n \"statement_2\":\"Jupiter is closer to the Sun than Earth.\",\n \"reason\": \"The context contradicts this, stating Jupiter is the fifth planet from the Sun, while Earth is the third.\",\n \"Attributed\": 0\n }}\n]\"\"\"\n if self.use_few_shot\n else \"\"\n )\n\n prompt = {\n \"system_prompt\": (\n \"\"\"\nGiven a question, context, and answer, analyze each statement in the answer and classify if the statement can be attributed to the given context or not. Output JSON strictly in the following format.\n\"\"\"\n + few_shot_prompt\n ),\n \"user_prompt\": (\"question: \" + question + \"\\ncontext: \" + context + \"\\nanswer: \" + answer),\n }\n\n content = self._llm.run(prompt)\n\n try:\n coverage = self.extract_attributed_from_broken_json(content)\n except Exception as e:\n print(f\"{type(e).__name__} Error: {content}, skipping\")\n return {\n \"LLM_based_context_coverage\": None,\n \"LLM_based_context_statements\": content,\n }\n\n return {\n \"LLM_based_context_coverage\": coverage,\n \"LLM_based_context_statements\": content,\n }\n\n @staticmethod\n def extract_attributed_from_broken_json(statements):\n pattern = r'\"Attributed\":\\s*(\\d+)'\n attributed_numbers = re.findall(pattern, statements, re.IGNORECASE)\n try:\n attributed_numbers = [int(num) for group in attributed_numbers for num in group if num]\n except Exception as e:\n print(f\"{type(e).__name__} Error: {attributed_numbers}, skipping\")\n return None\n coverage = sum(attributed_numbers) / len(attributed_numbers) if attributed_numbers else None\n return coverage" }, { "identifier": "LLMBasedContextPrecision", "path": "continuous_eval/metrics/retrieval_LLM_based_metrics.py", "snippet": "class LLMBasedContextPrecision(LLMBasedMetric):\n def __init__(\n self,\n model: LLMInterface = DefaultLLM,\n use_few_shot: bool = True,\n log_relevance_by_context: bool = False,\n ):\n super().__init__(model)\n self.use_few_shot = use_few_shot\n self.log_relevance_by_context = log_relevance_by_context\n\n def __str__(self):\n return f\"LLMBasedContextPrecision(model={self.model}, use_few_shot={self.use_few_shot})\"\n\n def calculate(self, question, retrieved_contexts, **kwargs):\n \"\"\"\n Calculate the context relevance score for the given datapoint.\n \"\"\"\n scores = []\n for context in retrieved_contexts:\n few_shot_prompt = (\n \"\"\"Example 1:\nQuestion: What is the capital of France?\nContext: Paris is the largest city and the capital of France. It has many historical monuments.\nResponse: Yes\nReasoning: The context states that Paris is the capital of France.\nExample 2:\nQuestion: What is the capital of France?\nContext: Lyon is a major city in France. It is known for its culinary arts.\nResponse: No\nReasoning: The context does not mention any city that is the capital of France.\nNow evaluate the following:\"\"\"\n if self.use_few_shot\n else \"\"\n )\n\n prompt = {\n \"system_prompt\": (\n \"\"\"\nGiven the following question and context, verify if the information in the given context is useful in answering the question. Respond with either Yes or No, followed by reasoning.\\n\n\"\"\"\n + few_shot_prompt\n ),\n \"user_prompt\": (\"Question: \" + question + \"\\nContext: \" + context + \"\\nResponse:\"),\n }\n\n content = self._llm.run(prompt)\n score = \"yes\" in content.lower()\n scores.append(score)\n\n relevant_chunks = 0\n average_precision = 0\n for i, score in enumerate(scores):\n if score:\n relevant_chunks += 1\n average_precision += relevant_chunks / (i + 1)\n average_precision = average_precision / relevant_chunks if relevant_chunks else 0\n precision = relevant_chunks / len(scores)\n\n if self.log_relevance_by_context:\n return {\n \"LLM_based_context_precision\": precision,\n \"LLM_based_context_average_precision\": average_precision,\n \"LLM_based_context_relevance_by_context\": scores,\n }\n else:\n return {\n \"LLM_based_context_precision\": precision,\n \"LLM_based_context_average_precision\": average_precision,\n }" }, { "identifier": "ExactSentenceMatch", "path": "continuous_eval/metrics/retrieval_matching_strategy.py", "snippet": "class ExactSentenceMatch(MatchingStrategy):\n @property\n def type(self):\n return MatchingStrategyType.SENTENCE_MATCH\n\n def is_relevant(self, retrieved_component, ground_truth_component):\n return retrieved_component == ground_truth_component" }, { "identifier": "RougeChunkMatch", "path": "continuous_eval/metrics/retrieval_matching_strategy.py", "snippet": "class RougeChunkMatch(MatchingStrategy):\n def __init__(self, threshold=_DEFAULT_ROUGE_CHUNK_MATCH_THRESHOLD) -> None:\n super().__init__()\n self.threshold = threshold\n\n @property\n def type(self):\n return MatchingStrategyType.CHUNK_MATCH\n\n def is_relevant(self, retrieved_component, ground_truth_component):\n return Rouge().get_scores(retrieved_component, ground_truth_component)[0][\"rouge-l\"][\"r\"] > self.threshold" }, { "identifier": "RougeSentenceMatch", "path": "continuous_eval/metrics/retrieval_matching_strategy.py", "snippet": "class RougeSentenceMatch(MatchingStrategy):\n def __init__(self, threshold=_DEFAULT_ROUGE_SENTENCE_MATCH_THRESHOLD) -> None:\n super().__init__()\n self.threshold = threshold\n\n @property\n def type(self):\n return MatchingStrategyType.SENTENCE_MATCH\n\n def is_relevant(self, retrieved_component, ground_truth_component):\n return Rouge().get_scores(retrieved_component, ground_truth_component)[0][\"rouge-l\"][\"r\"] > self.threshold" }, { "identifier": "PrecisionRecallF1", "path": "continuous_eval/metrics/retrieval_precision_recall_f1.py", "snippet": "class PrecisionRecallF1(Metric):\n def __init__(self, matching_strategy: MatchingStrategy = ExactChunkMatch()):\n super().__init__()\n assert isinstance(\n matching_strategy, MatchingStrategy\n ), \"Matching strategy must be an instance of MatchingStrategy.\"\n self.matching_strategy = matching_strategy\n\n def calculate(self, retrieved_contexts, ground_truth_contexts, **kwargs):\n # Calculate precision, recall and f1 based on different matching strategies.\n # These metrics do not consider the order or rank of relevant information in the retrieval.\n if self.matching_strategy.type == MatchingStrategyType.CHUNK_MATCH:\n ret_components = retrieved_contexts\n gt_components = ground_truth_contexts\n elif self.matching_strategy.type == MatchingStrategyType.SENTENCE_MATCH:\n ret_components = [sentence for chunk in retrieved_contexts for sentence in sent_tokenize(chunk)]\n gt_components = [sentence for chunk in ground_truth_contexts for sentence in sent_tokenize(chunk)]\n\n relevant_ret_components = 0\n hit_gt_components = set()\n for ret_component in ret_components:\n for gt_component in gt_components:\n if self.matching_strategy.is_relevant(ret_component, gt_component):\n relevant_ret_components += 1\n hit_gt_components.add(gt_component)\n continue\n precision = relevant_ret_components / len(ret_components) if ret_components else 0.0\n recall = len(hit_gt_components) / len(gt_components) if gt_components else 0.0\n\n try:\n f1 = 2 * (precision * recall) / (precision + recall)\n except ZeroDivisionError:\n f1 = 0.0\n return {\"context_precision\": precision, \"context_recall\": recall, \"context_f1\": f1}" }, { "identifier": "RankedRetrievalMetrics", "path": "continuous_eval/metrics/retrieval_ranked_metrics.py", "snippet": "class RankedRetrievalMetrics(Metric):\n def __init__(self, matching_strategy: MatchingStrategy = ExactChunkMatch()) -> None:\n super().__init__()\n self.matching_strategy = matching_strategy\n assert isinstance(\n matching_strategy, MatchingStrategy\n ), \"Matching strategy must be an instance of MatchingStrategy.\"\n assert (\n self.matching_strategy.type == MatchingStrategyType.CHUNK_MATCH\n ), \"Ranked metrics are calculated at chunk level.\"\n\n def calculate(self, retrieved_contexts, ground_truth_contexts, **kwargs):\n # Calculate ranked metrics (MAP, MRR, NDCG) based on different matching strategies.\n map = self.calculate_average_precision(retrieved_contexts, ground_truth_contexts)\n mrr = self.calculate_reciprocal_rank(retrieved_contexts, ground_truth_contexts)\n ndcg = self.calculate_normalized_discounted_cumulative_gain(retrieved_contexts, ground_truth_contexts)\n return {\"average_precision\": map, \"reciprocal_rank\": mrr, \"ndcg\": ndcg}\n\n def calculate_average_precision(self, retrieved_contexts, ground_truth_contexts, **kwargs):\n # Calculate average precision for a single query retrieval\n\n # Calculate average precision for each relevant chunk\n average_precision = 0\n relevant_chunks = 0\n\n for i, chunk in enumerate(retrieved_contexts):\n for ground_truth_chunk in ground_truth_contexts:\n if self.matching_strategy.is_relevant(chunk, ground_truth_chunk):\n relevant_chunks += 1\n average_precision += relevant_chunks / (i + 1)\n continue\n\n return average_precision / relevant_chunks if relevant_chunks else 0\n\n def calculate_reciprocal_rank(self, retrieved_contexts, ground_truth_contexts, **kwargs):\n # Calculate reciprocal rank for a single query retrieval\n\n # Calculate reciprocal rank for each relevant chunk\n for i, chunk in enumerate(retrieved_contexts):\n for ground_truth_chunk in ground_truth_contexts:\n if self.matching_strategy.is_relevant(chunk, ground_truth_chunk):\n return 1 / (i + 1)\n\n # If no relevant chunk is found, return 0\n return 0\n\n def calculate_normalized_discounted_cumulative_gain(self, retrieved_contexts, ground_truth_contexts, **kwargs):\n # Calculate normalized discounted cumulative gain for a single query retrieval\n\n # Calculate discounted cumulative gain\n dcg = 0\n for i, chunk in enumerate(retrieved_contexts):\n for ground_truth_chunk in ground_truth_contexts:\n if self.matching_strategy.is_relevant(chunk, ground_truth_chunk):\n # Calculate relevance score (relevant gain = 1)\n dcg += 1 / log(i + 2, 2)\n continue\n\n # Calculate ideal discounted cumulative gain\n idcg = 0\n for i in range(len(ground_truth_contexts)):\n idcg += 1 / log(i + 2, 2)\n\n return dcg / idcg" }, { "identifier": "example_datum", "path": "tests/helpers/example_datum.py", "snippet": "CAPITAL_OF_FRANCE = {\n \"question\": \"What is the capital of France?\",\n \"retrieved_contexts\": [\n \"Paris is the largest city in France.\",\n \"Lyon is a major city in France.\",\n ],\n \"ground_truth_contexts\": [\"Paris is the capital of France.\"],\n \"answer\": \"Paris\",\n \"ground_truths\": [\"Paris\"],\n}\nROMEO_AND_JULIET = {\n \"question\": \"Who wrote Romeo and Juliet?\",\n \"retrieved_contexts\": [\n \"Shakespeare was a playwright.\",\n \"Romeo and Juliet is a play by Shakespeare.\",\n ],\n \"ground_truth_contexts\": [\n \"Shakespeare was a playwright.\",\n \"Romeo and Juliet is a play by William Shakespeare.\",\n ],\n \"answer\": \"William Shakespeare\",\n \"ground_truths\": [\"William Shakespeare\"],\n}\nIMPLICATIONS_GLOBAL_WARMING = {\n \"question\": \"What are the implications of global warming?\",\n \"retrieved_contexts\": [\n (\n \"Global warming refers to the long-term rise in the average temperature of the Earth's climate system. \"\n \"It is a major aspect of climate change, and has been demonstrated by direct temperature measurements \"\n \"and by measurements of various effects of the warming. The terms are commonly used interchangeably, \"\n \"though global warming is more specifically about rising surface temperatures, while climate change includes \"\n \"global warming as well as everything else that increasing greenhouse gas amounts will affect. \"\n \"A 2016 report stated that the Arctic is warming at a rate double that of the global average. \"\n \"The effects of global warming include rising sea levels, regional changes in precipitation, more frequent \"\n \"extreme weather events such as heat waves, and expansion of deserts. Surface temperature increases are \"\n \"greatest in the Arctic, which has contributed to the retreat of glaciers, permafrost, and sea ice. \"\n \"Overall, higher temperatures bring more rain and snowfall, but for some regions, droughts and wildfires \"\n \"increase instead. Climate change threatens to diminish the supply of fresh water. A warming atmosphere \"\n \"can hold, and more frequently does hold, larger quantities of water vapor, which can lead to more intense \"\n \"rainstorms, causing destructive erosion. Warming also creates conditions that can lead to more powerful \"\n \"hurricanes. Rising temperatures also have the potential to change the nature of global rainfall, snow, \"\n \"and river flows. Effects significant to humans include the threat to food security from decreasing crop \"\n \"yields and the abandonment of populated areas due to rising sea levels. Because the climate system has \"\n \"a large inertia and greenhouse gases will remain in the atmosphere for a long time, climatic changes and \"\n \"their effects will continue for many centuries even if greenhouse gas emissions are stopped.\"\n ),\n (\n \"Environmental impacts of climate change might include harsher hurricanes and storms, the death of reefs \"\n \"and forests, more frequent and severe droughts, increased heat waves, and stronger, more intense wildfires. \"\n \"Such changes will have significant implications for human societies and the natural world. The extent of these \"\n \"effects will depend largely on the degree of future global warming and the strategies adopted for mitigation \"\n \"and adaptation. Some effects of climate change, such as record high temperatures and melting glaciers, are \"\n \"already being observed. The world community has taken some steps towards addressing climate change. The \"\n \"2015 Paris Agreement, for instance, set the goal of limiting global warming to well below 2.0 degrees Celsius \"\n \"relative to pre-industrial levels; and to limit the increase to 1.5 degrees Celsius, recognizing that this would \"\n \"substantially reduce the risks and impacts of climate change. This agreement is meant to signal the beginning \"\n \"of the end of over two centuries of predominance of fossil fuels. Some experts have called for a coordinated \"\n \"economic transition to rapid decarbonization, climate finance and 'climate justice'. The overall conclusion of \"\n \"the Intergovernmental Panel on Climate Change (IPCC), the peak scientific body on climate change, is that it \"\n \"is 'extremely likely' that the majority of global warming since 1950 has been caused by human activities.\"\n ),\n ],\n \"ground_truth_contexts\": [\n (\n \"Climate change threatens to diminish the supply of fresh water. A warming atmosphere \"\n \"can hold, and more frequently does hold, larger quantities of water vapor, which can lead to more intense \"\n \"rainstorms, causing destructive erosion. To mitigate these impacts, \"\n \"strategies such as reducing greenhouse gas emissions and enhancing sustainability practices are vital. \"\n \"The Paris Agreement of 2015 marks a global effort to limit warming and reduce the risks associated with \"\n \"climate change, aiming to transition away from fossil fuels towards cleaner, renewable sources of energy.\"\n )\n ],\n \"answer\": \"Reducing greenhouse gas emissions, transitioning to renewable energy\",\n \"ground_truths\": [\n \"Reducing greenhouse gas emissions\",\n \"Transitioning to renewable energy\",\n ],\n}\nFARGO = {\n \"question\": \"Did Fargo win the golden globe nominations for both seasons?\",\n \"retrieved_contexts\": [\n \"Fargo is an American black comedy crime drama television series created and primarily written by Noah Hawley. The show is inspired by the 1996 film of the same name, which was written and directed by the Coen brothers, and takes place within the same fictional universe. The Coens were impressed by Hawley's script and agreed to be named as executive producers.[3] The series premiered on April 15, 2014, on FX,[3] and follows an anthology format, with each season set in a different era and location, with a different story and mostly new characters and cast, although there is minor overlap. Each season is heavily influenced by various Coen brothers films, with each containing numerous references to them.[4]\",\n \"The first season, set primarily in Minnesota and North Dakota from January 2006 to February 2007 and starring Billy Bob Thornton, Allison Tolman, Colin Hanks, and Martin Freeman, received wide acclaim from critics.[5] It won the Primetime Emmy Awards for Outstanding Miniseries, Outstanding Directing, and Outstanding Casting, and received 15 additional nominations including Outstanding Writing, another Outstanding Directing nomination, and acting nominations for all four leads. It also won the Golden Globe Awards for Best Miniseries or Television Film and Best Actor – Miniseries or Television Film for Thornton.\",\n \"The second season, set in Minnesota, North Dakota, and South Dakota in March 1979 and starring Kirsten Dunst, Patrick Wilson, Jesse Plemons, Jean Smart, Allison Tolman, and Ted Danson, received widespread critical acclaim.[6] It received three Golden Globe nominations, along with several Emmy nominations including Outstanding Miniseries, and acting nominations for Dunst, Plemons, Smart, and Bokeem Woodbine.\",\n ],\n \"ground_truth_contexts\": [\n \"The first season, set primarily in Minnesota and North Dakota from January 2006 to February 2007 and starring Billy Bob Thornton, Allison Tolman, Colin Hanks, and Martin Freeman, received wide acclaim from critics.[5] It won the Primetime Emmy Awards for Outstanding Miniseries, Outstanding Directing, and Outstanding Casting, and received 15 additional nominations including Outstanding Writing, another Outstanding Directing nomination, and acting nominations for all four leads. It also won the Golden Globe Awards for Best Miniseries or Television Film and Best Actor – Miniseries or Television Film for Thornton.\",\n \"The second season, set in Minnesota, North Dakota, and South Dakota in March 1979 and starring Kirsten Dunst, Patrick Wilson, Jesse Plemons, Jean Smart, Allison Tolman, and Ted Danson, received widespread critical acclaim.[6] It received three Golden Globe nominations, along with several Emmy nominations including Outstanding Miniseries, and acting nominations for Dunst, Plemons, Smart, and Bokeem Woodbine.\",\n ],\n \"answer\": \"Berlin\",\n \"ground_truths\": [\n \"Yes, they did get a nomination in season 1 and 2.\",\n \"Not really, they didn't win for season three.\",\n ],\n}" }, { "identifier": "all_close", "path": "tests/helpers/utils.py", "snippet": "def all_close(\n datum_1: Dict[str, Union[Number, List[Number]]],\n datum_2: Dict[str, Union[Number, List[Number]]],\n rel_tol: float = 1e-8,\n abs_tol: float = 1e-4,\n):\n if set(datum_1.keys()) != set(datum_2.keys()):\n return False\n for key, value1 in datum_1.items():\n if isinstance(value1, list):\n if not all(math.isclose(v1, v2, rel_tol=rel_tol, abs_tol=abs_tol) for v1, v2 in zip(value1, datum_2[key])):\n return False\n else:\n if not math.isclose(value1, datum_2[key], rel_tol=rel_tol, abs_tol=abs_tol):\n return False\n return True" }, { "identifier": "in_zero_one", "path": "tests/helpers/utils.py", "snippet": "def in_zero_one(ret: Union[Number, Dict[str, Number]]):\n if isinstance(ret, Number):\n return ret >= 0 and ret <= 1\n else:\n return all(v >= 0 and v <= 1 for v in ret.values())" } ]
import pytest from continuous_eval.llm_factory import LLMFactory from continuous_eval.metrics import ( ExactSentenceMatch, LLMBasedContextCoverage, LLMBasedContextPrecision, PrecisionRecallF1, RankedRetrievalMetrics, RougeChunkMatch, RougeSentenceMatch, ) from tests.helpers import example_datum from tests.helpers.utils import all_close, in_zero_one
6,792
def test_precision_recall_exact_chunk_match(): data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET] expected_results = [ {"context_precision": 0.0, "context_recall": 0.0, "context_f1": 0.0}, {"context_precision": 1.0, "context_recall": 1.0, "context_f1": 1.0}, ] metric = PrecisionRecallF1(RougeChunkMatch(threshold=0.7)) assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results)) def test_precision_recall_exact_sentence_match(): data = [example_datum.ROMEO_AND_JULIET] expected_results = [{"context_precision": 1.0, "context_recall": 1.0, "context_f1": 1.0}] metric = PrecisionRecallF1(RougeSentenceMatch(threshold=0.8)) assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results)) def test_precision_recall_rouge_sentence_match(): data = [example_datum.CAPITAL_OF_FRANCE, example_datum.IMPLICATIONS_GLOBAL_WARMING] expected_results = [ {"context_precision": 0.0, "context_recall": 0.0, "context_f1": 0.0}, {"context_precision": 0.09090909090909091, "context_recall": 0.5, "context_f1": 0.15384615384615385}, ] metric = PrecisionRecallF1(RougeSentenceMatch()) assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results)) def test_ranked_retrieval_exact_chunk_match(): data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET] expected_results = [ {"average_precision": 0, "reciprocal_rank": 0, "ndcg": 0.0}, {"average_precision": 1.0, "reciprocal_rank": 1.0, "ndcg": 1.0}, ] metric = RankedRetrievalMetrics(RougeChunkMatch()) assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results)) def test_ranked_retrieval_exact_sentence_match(): with pytest.raises(AssertionError): RankedRetrievalMetrics(ExactSentenceMatch()) def test_llm_based_context_precision(): data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET] metric = LLMBasedContextPrecision() assert all(in_zero_one(metric.calculate(**datum)) for datum in data) def test_llm_based_context_coverage_openai(): data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET]
def test_precision_recall_exact_chunk_match(): data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET] expected_results = [ {"context_precision": 0.0, "context_recall": 0.0, "context_f1": 0.0}, {"context_precision": 1.0, "context_recall": 1.0, "context_f1": 1.0}, ] metric = PrecisionRecallF1(RougeChunkMatch(threshold=0.7)) assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results)) def test_precision_recall_exact_sentence_match(): data = [example_datum.ROMEO_AND_JULIET] expected_results = [{"context_precision": 1.0, "context_recall": 1.0, "context_f1": 1.0}] metric = PrecisionRecallF1(RougeSentenceMatch(threshold=0.8)) assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results)) def test_precision_recall_rouge_sentence_match(): data = [example_datum.CAPITAL_OF_FRANCE, example_datum.IMPLICATIONS_GLOBAL_WARMING] expected_results = [ {"context_precision": 0.0, "context_recall": 0.0, "context_f1": 0.0}, {"context_precision": 0.09090909090909091, "context_recall": 0.5, "context_f1": 0.15384615384615385}, ] metric = PrecisionRecallF1(RougeSentenceMatch()) assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results)) def test_ranked_retrieval_exact_chunk_match(): data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET] expected_results = [ {"average_precision": 0, "reciprocal_rank": 0, "ndcg": 0.0}, {"average_precision": 1.0, "reciprocal_rank": 1.0, "ndcg": 1.0}, ] metric = RankedRetrievalMetrics(RougeChunkMatch()) assert all(all_close(metric.calculate(**datum), expected) for datum, expected in zip(data, expected_results)) def test_ranked_retrieval_exact_sentence_match(): with pytest.raises(AssertionError): RankedRetrievalMetrics(ExactSentenceMatch()) def test_llm_based_context_precision(): data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET] metric = LLMBasedContextPrecision() assert all(in_zero_one(metric.calculate(**datum)) for datum in data) def test_llm_based_context_coverage_openai(): data = [example_datum.CAPITAL_OF_FRANCE, example_datum.ROMEO_AND_JULIET]
metric = LLMBasedContextCoverage(model=LLMFactory("gpt-3.5-turbo-1106"))
0
2023-12-08 21:30:39+00:00
8k
Seunggu0305/VLCounter
tools/models/VLCounter.py
[ { "identifier": "VPTCLIPVisionTransformer", "path": "tools/models/ViT_Encoder.py", "snippet": "class VPTCLIPVisionTransformer(nn.Module):\n def __init__(self, input_resolution=384, patch_size=16, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.1, out_indices=[6,7,8,11], pretrained=None, get_embeddings=True, \n num_tokens=10, prompt_dim=768, total_d_layer=11, **kwargs):\n super().__init__()\n self.pretrained = pretrained\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n self.spatial_size = input_resolution // patch_size\n self.ln_pre = LayerNorm(width)\n self.get_embeddings = get_embeddings\n self.num_layers = layers\n\n self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)\n\n self.out_indices = out_indices\n\n if get_embeddings:\n self.ln_post = LayerNorm(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n\n ## Setting of visual prompt tuning\n self.num_tokens = num_tokens \n self.prompt_dim = prompt_dim\n self.total_d_layer = total_d_layer\n\n ## Add the prompt parameters # exclude_key=prompt:\n self._init_prompt(patch_size, self.num_tokens, self.prompt_dim, self.total_d_layer)\n\n self.embed_dim = width\n self.num_heads = heads\n self.patch_size = patch_size\n \n def _init_prompt(self, patch, num_tokens, prompt_dim, total_d_layer):\n patch_size = []\n patch_size.append(patch)\n patch_size.append(patch)\n val = math.sqrt(6. / float(3 * reduce(mul, patch_size, 1) + prompt_dim)) # noqa\n\n if total_d_layer >= 0:\n self.prompt_embeddings = nn.Parameter(torch.zeros(1, num_tokens, prompt_dim))\n # xavier_uniform initialization\n nn.init.uniform_(self.prompt_embeddings.data, -val, val)\n\n if total_d_layer > 0: # noqa\n self.deep_prompt_embeddings = nn.Parameter(torch.zeros(total_d_layer, num_tokens, prompt_dim))\n # xavier_uniform initialization\n nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)\n\n self.prompt_proj = nn.Linear(prompt_dim, prompt_dim)\n nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out') \n self.prompt_norm = LayerNorm(prompt_dim, eps=1e-6)\n self.prompt_dropout = Dropout(0.1)\n\n else: # total_d_layer < 0\n self.deep_prompt_embeddings = nn.Parameter(torch.zeros(abs(total_d_layer), num_tokens, prompt_dim))\n nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)\n self.prompt_proj = nn.Linear(prompt_dim, prompt_dim)\n nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out') \n self.prompt_norm = LayerNorm(prompt_dim, eps=1e-6)\n self.prompt_dropout = Dropout(0.1)\n \n\n def init_weights(self, pretrained=None):\n pretrained = pretrained or self.pretrained\n if isinstance(pretrained, str):\n checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()\n\n state_dict = {}\n\n for k in checkpoint.keys():\n if k.startswith('visual.'):\n new_k = k.replace('visual.', '')\n state_dict[new_k] = checkpoint[k]\n\n if 'positional_embedding' in state_dict.keys():\n if self.positional_embedding.shape != state_dict['positional_embedding'].shape:\n # (1025, 768) (197, 768) \n print(f'Resize the pos_embed shape from {state_dict[\"positional_embedding\"].shape} to {self.positional_embedding.shape}')\n cls_pos = state_dict[\"positional_embedding\"][0:1, :]\n \n spatial_pos = F.interpolate(state_dict[\"positional_embedding\"][1:,].reshape(1, 14, 14, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')\n spatial_pos = spatial_pos.reshape(768, self.spatial_size*self.spatial_size).permute(1, 0)\n positional_embedding = torch.cat([cls_pos, spatial_pos], dim=0)\n state_dict['positional_embedding'] = positional_embedding\n assert self.positional_embedding.shape == state_dict['positional_embedding'].shape\n\n u, w = self.load_state_dict(state_dict, False)\n print(u, w, 'are misaligned params in vision transformer')\n\n self.attn = None\n if self.attn == None:\n for i in range(1,2): # surgery 7, maskclip 2\n self.attn = Attention(self.embed_dim, self.embed_dim, self.num_heads, True)\n self.attn.qkv.weight.data = self.transformer.resblocks[-i].attn.in_proj_weight.clone()\n self.attn.qkv.bias.data = self.transformer.resblocks[-i].attn.in_proj_bias.clone()\n self.attn.proj.weight.data = self.transformer.resblocks[-i].attn.out_proj.weight.clone()\n self.attn.proj.bias.data = self.transformer.resblocks[-i].attn.out_proj.bias.clone()\n self.transformer.resblocks[-i].attn = self.attn\n\n def forward(self, x: torch.Tensor):\n x = self.conv1(x)\n B, C, H, W = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1)\n x = x.permute(0, 2, 1)\n x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)\n\n pos = self.positional_embedding.to(x.dtype)\n cls_pos = pos[0,:] + self.class_embedding.to(x.dtype)\n spatial_pos = F.interpolate(pos[1:,].reshape(1, self.spatial_size, self.spatial_size, C).permute(0, 3, 1, 2), size=(H, W), mode='bilinear')\n spatial_pos = spatial_pos.reshape(1, C, H*W).permute(0, 2, 1)\n pos = torch.cat([cls_pos.reshape(1, 1, C), spatial_pos], dim=1)\n x = x + pos\n x = self.ln_pre(x)\n\n if self.total_d_layer >=0:\n # concat prompt\n x = torch.cat((\n x[:, :1, :],\n self.prompt_dropout(self.prompt_proj(self.prompt_embeddings).expand(B, -1, -1)),\n x[:, 1:, :]\n ), dim=1)\n\n x = x.permute(1, 0, 2)\n\n features = []\n outs = []\n x, features = self.forward_deep_prompt(x, features, H, W)\n\n if self.get_embeddings:\n x = x.permute(1, 0, 2)\n x = self.ln_post(x)\n x = x @ self.proj\n\n global_embedding = x[:, 0]\n visual_embedding = x[:, -(H*W):].reshape(B, H, W, -1).permute(0, 3, 1, 2)\n\n visual_embedding = visual_embedding / visual_embedding.norm(dim=1, keepdim=True)\n features.append(visual_embedding)\n\n outs.append(tuple(features))\n global_embedding = global_embedding / global_embedding.norm(dim=1, keepdim=True)\n outs.append(global_embedding)\n\n return outs[0]\n\n\n def forward_deep_prompt(self, embedding_output, features, H, W, out_last=False):\n B = embedding_output.shape[1]\n\n for i in range(self.num_layers):\n if i == 0:\n hidden_states = self.transformer.resblocks[i](embedding_output)\n elif i <= self.deep_prompt_embeddings.shape[0]:\n deep_prompt_emb = self.prompt_dropout(self.prompt_proj(self.deep_prompt_embeddings[i-1]).expand(B, -1, -1)).permute(1, 0, 2)\n hidden_states = torch.cat((\n hidden_states[:1, :, :],\n deep_prompt_emb,\n hidden_states[(1+self.num_tokens):, :, :]\n ), dim=0)\n\n hidden_states = self.transformer.resblocks[i](hidden_states)\n \n if len(self.out_indices) > 1:\n if i in self.out_indices[:-1]:\n xp = hidden_states.permute(1, 0, 2)[:, -(H*W):, :].permute(0, 2, 1).reshape(B, -1, H, W)\n features.append(xp.contiguous() / xp.norm(dim=1,keepdim=True))\n\n encoded = self.prompt_norm(hidden_states)\n return encoded, features" }, { "identifier": "SPTCLIPVisionTransformer", "path": "tools/models/ViT_Encoder_add.py", "snippet": "class SPTCLIPVisionTransformer(nn.Module):\n def __init__(self, input_resolution=384, patch_size=16, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.1, out_indices=[5,6,7,8,11], pretrained=None, get_embeddings=True, \n num_tokens=10, prompt_dim=768, total_d_layer=11, **kwargs):\n super().__init__()\n self.pretrained = pretrained\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n self.spatial_size = input_resolution // patch_size\n self.ln_pre = LayerNorm(width)\n self.get_embeddings = get_embeddings\n self.num_layers = layers\n\n self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)\n\n self.out_indices = out_indices\n\n if get_embeddings:\n self.ln_post = LayerNorm(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.text_proj = nn.Linear(512, width)\n nn.init.kaiming_normal_(self.text_proj.weight, a=0, mode='fan_out') \n self.text_dropout = nn.Dropout(0.1)\n ## Setting of visual prompt tuning\n self.num_tokens = num_tokens \n self.prompt_dim = prompt_dim\n self.total_d_layer = total_d_layer\n\n\n ## Add the prompt parameters # exclude_key=prompt:\n self._init_prompt(patch_size, self.num_tokens, self.prompt_dim, self.total_d_layer)\n\n self.embed_dim = width\n self.num_heads = heads\n self.patch_size = patch_size\n \n \n def _init_prompt(self, patch, num_tokens, prompt_dim, total_d_layer):\n patch_size = []\n patch_size.append(patch)\n patch_size.append(patch)\n val = math.sqrt(6. / float(3 * reduce(mul, patch_size, 1) + prompt_dim)) # noqa\n\n if total_d_layer >= 0:\n self.prompt_embeddings = nn.Parameter(torch.zeros(1, num_tokens, prompt_dim))\n # xavier_uniform initialization\n nn.init.uniform_(self.prompt_embeddings.data, -val, val)\n\n if total_d_layer > 0: # noqa\n self.deep_prompt_embeddings = nn.Parameter(torch.zeros(total_d_layer, num_tokens, prompt_dim))\n # xavier_uniform initialization\n nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)\n\n self.prompt_proj = nn.Linear(prompt_dim, prompt_dim)\n nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out') \n self.prompt_norm = LayerNorm(prompt_dim, eps=1e-6)\n self.prompt_dropout = Dropout(0.1)\n\n else: # total_d_layer < 0\n self.deep_prompt_embeddings = nn.Parameter(torch.zeros(abs(total_d_layer), num_tokens, prompt_dim))\n nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)\n self.prompt_proj = nn.Linear(prompt_dim, prompt_dim)\n nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out') \n self.prompt_norm = LayerNorm(prompt_dim, eps=1e-6)\n self.prompt_dropout = Dropout(0.1)\n \n\n def init_weights(self, pretrained=None):\n pretrained = pretrained or self.pretrained\n if isinstance(pretrained, str):\n checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()\n # checkpoint = torch.load(pretrained)['model']\n\n state_dict = {}\n\n for k in checkpoint.keys():\n if k.startswith('visual.'):\n new_k = k.replace('visual.', '')\n # if k.startswith('module.visual.'):\n # new_k = k.replace('module.visual.', '')\n state_dict[new_k] = checkpoint[k].float()\n\n if 'positional_embedding' in state_dict.keys():\n if self.positional_embedding.shape != state_dict['positional_embedding'].shape:\n # (1025, 768) (197, 768) \n print(f'Resize the pos_embed shape from {state_dict[\"positional_embedding\"].shape} to {self.positional_embedding.shape}')\n cls_pos = state_dict[\"positional_embedding\"][0:1, :]\n if self.patch_size == 16:\n spatial_pos = F.interpolate(state_dict[\"positional_embedding\"][1:,].reshape(1, 14, 14, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')\n elif self.patch_size == 32:\n spatial_pos = F.interpolate(state_dict[\"positional_embedding\"][1:,].reshape(1, 7, 7, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')\n else:\n assert AttributeError('Patch Size should be 16 or 32')\n spatial_pos = spatial_pos.reshape(768, self.spatial_size*self.spatial_size).permute(1, 0)\n positional_embedding = torch.cat([cls_pos, spatial_pos], dim=0)\n state_dict['positional_embedding'] = positional_embedding\n assert self.positional_embedding.shape == state_dict['positional_embedding'].shape\n \n # del state_dict['conv1.weight']\n u, w = self.load_state_dict(state_dict, False)\n print(u, w, 'are misaligned params in vision transformer')\n\n self.attn = None\n if self.attn == None:\n for i in range(1,2): # surgery 7, maskclip 2\n self.attn = Attention(self.embed_dim, self.embed_dim, self.num_heads, True)\n self.attn.qkv.weight.data = self.transformer.resblocks[-i].attn.in_proj_weight.clone()\n self.attn.qkv.bias.data = self.transformer.resblocks[-i].attn.in_proj_bias.clone()\n self.attn.proj.weight.data = self.transformer.resblocks[-i].attn.out_proj.weight.clone()\n self.attn.proj.bias.data = self.transformer.resblocks[-i].attn.out_proj.bias.clone()\n self.transformer.resblocks[-i].attn = self.attn\n\n def forward(self, x: torch.Tensor, _t: torch.Tensor):\n x = self.conv1(x)\n B, C, H, W = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1)\n x = x.permute(0, 2, 1)\n x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)\n\n pos = self.positional_embedding.to(x.dtype)\n cls_pos = pos[0,:] + self.class_embedding.to(x.dtype)\n spatial_pos = F.interpolate(pos[1:,].reshape(1, self.spatial_size, self.spatial_size, C).permute(0, 3, 1, 2), size=(H, W), mode='bilinear')\n spatial_pos = spatial_pos.reshape(1, C, H*W).permute(0, 2, 1)\n pos = torch.cat([cls_pos.reshape(1, 1, C), spatial_pos], dim=1)\n x = x + pos\n x = self.ln_pre(x)\n \n if self.total_d_layer >=0:\n # concat prompt\n x = torch.cat((\n x[:, :1, :],\n self.prompt_dropout(self.prompt_proj(self.prompt_embeddings).expand(B, -1, -1)) + self.text_dropout(self.text_proj(_t).expand(-1, self.num_tokens, -1)),\n x[:, 1:, :]\n ), dim=1)\n\n x = x.permute(1, 0, 2)\n\n features = []\n outs = []\n x, features = self.forward_deep_prompt(x, features, H, W, _t)\n\n if self.get_embeddings:\n x = x.permute(1, 0, 2)\n x = self.ln_post(x)\n x = x @ self.proj\n\n global_embedding = x[:, 0]\n visual_embedding = x[:, -(H*W):].reshape(B, H, W, -1).permute(0, 3, 1, 2)\n\n visual_embedding = visual_embedding / visual_embedding.norm(dim=1, keepdim=True)\n features.append(visual_embedding)\n\n outs.append(tuple(features))\n global_embedding = global_embedding / global_embedding.norm(dim=1, keepdim=True)\n outs.append(global_embedding)\n\n return outs[0]\n\n\n def forward_deep_prompt(self, embedding_output, features, H, W, _t, out_last=False):\n B = embedding_output.shape[1]\n\n for i in range(self.num_layers):\n if i == 0:\n hidden_states = self.transformer.resblocks[i](embedding_output)\n elif i <= self.deep_prompt_embeddings.shape[0]:\n deep_prompt_emb = self.prompt_dropout(self.prompt_proj(self.deep_prompt_embeddings[i-1]).expand(B, -1, -1)).permute(1, 0, 2)\n deep_text = self.text_dropout(self.text_proj(_t).expand(-1,self.num_tokens,-1)).permute(1,0,2)\n hidden_states = torch.cat((\n hidden_states[:1, :, :],\n deep_prompt_emb + deep_text,\n hidden_states[(1+self.num_tokens):, :, :]\n ), dim=0)\n\n hidden_states = self.transformer.resblocks[i](hidden_states)\n else:\n hidden_states = self.transformer.resblocks[i](hidden_states)\n \n if len(self.out_indices) > 1:\n if i in self.out_indices[:-1]:\n xp = hidden_states.permute(1, 0, 2)[:, -(H*W):, :].permute(0, 2, 1).reshape(B, -1, H, W)\n features.append(xp.contiguous() / xp.norm(dim=1,keepdim=True))\n\n encoded = self.prompt_norm(hidden_states)\n return encoded, features" }, { "identifier": "CLIPTextEncoder", "path": "tools/models/Text_Encoder.py", "snippet": "class CLIPTextEncoder(nn.Module):\n def __init__(self, context_length=77,\n vocab_size=49408,\n # vocab_size=49408+1,\n transformer_width=512,\n transformer_heads=8,\n transformer_layers=12,\n embed_dim=512,\n out_dim=256,\n pretrained=None, **kwargs):\n super().__init__()\n\n self.pretrained = pretrained\n\n self.context_length = context_length\n\n self.transformer = Transformer(\n width=transformer_width,\n layers=transformer_layers,\n heads=transformer_heads,\n attn_mask=self.build_attention_mask()\n )\n\n self.vocab_size = vocab_size\n self.token_embedding = nn.Embedding(vocab_size, transformer_width)\n self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))\n self.ln_final = LayerNorm(transformer_width)\n self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))\n # self.text_projection = nn.Linear(transformer_width, embed_dim)\n \n def init_weights(self, pretrained=None):\n pretrained = pretrained or self.pretrained\n if isinstance(pretrained, str):\n checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()\n # checkpoint = torch.load(pretrained)['model']\n\n state_dict = {}\n\n for k in checkpoint.keys():\n if k.startswith('transformer.'):\n # if k.startswith('module.encode_text.transformer.'):\n # new_k = k.replace('module.encode_text.', '')\n # state_dict[new_k] = checkpoint[k].float()\n state_dict[k] = checkpoint[k].float()\n \n if k == 'positional_embedding' or k == 'text_projection' or k.startswith('token_embedding') or k.startswith('ln_final'):\n # if k == 'module.encode_text.positional_embedding' or k.startswith('module.encode_text.text_projection') or k.startswith('module.encode_text.token_embedding') or k.startswith('module.encode_text.ln_final'):\n # new_k = k.replace('module.encode_text.', '')\n # if new_k == 'positional_embedding' and checkpoint[k].size(0) > self.context_length:\n if k == 'positional_embedding' and checkpoint[k].size(0) > self.context_length:\n checkpoint[k] = checkpoint[k][:self.context_length]\n print('positional_embedding is tuncated from 77 to', self.context_length)\n # state_dict[new_k] = checkpoint[k]\n state_dict[k] = checkpoint[k]\n \n u, w = self.load_state_dict(state_dict, False)\n if u != [] or w != [] :\n print(u, w, 'are misaligned params in text encoder')\n\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the vision tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.context_length, self.context_length)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n def forward(self, text):\n x = self.token_embedding(text)\n x = x + self.positional_embedding \n x = x.permute(1, 0, 2)\n x = self.transformer(x)\n x = x.permute(1, 0, 2)\n x = self.ln_final(x)\n x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection\n # x = self.text_projection(x[torch.arange(x.shape[0]), text.argmax(dim=-1)])\n return x" } ]
import math import pickle import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from .ViT_Encoder import VPTCLIPVisionTransformer as vpt from .ViT_Encoder_add import SPTCLIPVisionTransformer as spt from .Text_Encoder import CLIPTextEncoder from timm.models.layers import trunc_normal_
6,434
def trunc_normal_init(module: nn.Module, mean: float = 0, std: float = 1, a: float = -2, b: float = 2, bias: float = 0) -> None: if hasattr(module, 'weight') and module.weight is not None: trunc_normal_(module.weight, mean, std, a, b) # type: ignore if hasattr(module, 'bias') and module.bias is not None: nn.init.constant_(module.bias, bias) # type: ignore def constant_init(module, val, bias=0): if hasattr(module, 'weight') and module.weight is not None: nn.init.constant_(module.weight, val) if hasattr(module, 'bias') and module.bias is not None: nn.init.constant_(module.bias, bias) class UpConv(nn.Module): def __init__(self, in_channels, out_channels, kernel, padding=0, flag=True): super(UpConv, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel, padding=padding) if flag: self.gn = nn.GroupNorm(8, out_channels) self.gelu = nn.GELU() self.up = nn.UpsamplingBilinear2d(scale_factor=2) self.flag = flag def forward(self, trg): trg = self.conv(trg) if self.flag: trg = self.up(self.gelu(self.gn(trg))) return trg class Counter(nn.Module): def __init__(self, args): super(Counter,self).__init__() self.v = args.v self.enc = args.enc embed_dims = 512 proj_dims = 64 self.t_proj = nn.Linear(embed_dims, proj_dims) self.v_proj = nn.Linear(embed_dims, proj_dims) self.proj = nn.Sequential( nn.Conv2d(768, proj_dims, 1), nn.GroupNorm(8, proj_dims), nn.GELU(), nn.UpsamplingBilinear2d(scale_factor=2) ) self.proj1 = nn.Sequential( nn.Conv2d(768, proj_dims, 1), nn.GroupNorm(8, proj_dims), nn.GELU(), nn.UpsamplingBilinear2d(scale_factor=4) ) self.proj2 = nn.Sequential( nn.Conv2d(768, proj_dims, 1), nn.GroupNorm(8, proj_dims), nn.GELU(), nn.UpsamplingBilinear2d(scale_factor=8) ) self.decoder = nn.ModuleList([ UpConv(proj_dims+1, proj_dims, 3, 1), UpConv(proj_dims, proj_dims, 3,1), UpConv(proj_dims, proj_dims, 3, 1), UpConv(proj_dims, proj_dims, 3,1), UpConv(proj_dims, 1, 1, flag=False) ]) self.attn_weight = nn.Parameter(torch.ones(1, 1, 24, 24)) self.attn_bias = nn.Parameter(torch.zeros(1, 1, 24, 24)) self.init_weights() if args.enc == "spt": self.v_enc = spt(pretrained=args.MODEL.pretrain+'ViT-B-16.pt', num_tokens=args.num_tokens, patch_size=args.patch_size) self.v_enc.init_weights() elif args.enc == "vpt":
def trunc_normal_init(module: nn.Module, mean: float = 0, std: float = 1, a: float = -2, b: float = 2, bias: float = 0) -> None: if hasattr(module, 'weight') and module.weight is not None: trunc_normal_(module.weight, mean, std, a, b) # type: ignore if hasattr(module, 'bias') and module.bias is not None: nn.init.constant_(module.bias, bias) # type: ignore def constant_init(module, val, bias=0): if hasattr(module, 'weight') and module.weight is not None: nn.init.constant_(module.weight, val) if hasattr(module, 'bias') and module.bias is not None: nn.init.constant_(module.bias, bias) class UpConv(nn.Module): def __init__(self, in_channels, out_channels, kernel, padding=0, flag=True): super(UpConv, self).__init__() self.conv = nn.Conv2d(in_channels, out_channels, kernel, padding=padding) if flag: self.gn = nn.GroupNorm(8, out_channels) self.gelu = nn.GELU() self.up = nn.UpsamplingBilinear2d(scale_factor=2) self.flag = flag def forward(self, trg): trg = self.conv(trg) if self.flag: trg = self.up(self.gelu(self.gn(trg))) return trg class Counter(nn.Module): def __init__(self, args): super(Counter,self).__init__() self.v = args.v self.enc = args.enc embed_dims = 512 proj_dims = 64 self.t_proj = nn.Linear(embed_dims, proj_dims) self.v_proj = nn.Linear(embed_dims, proj_dims) self.proj = nn.Sequential( nn.Conv2d(768, proj_dims, 1), nn.GroupNorm(8, proj_dims), nn.GELU(), nn.UpsamplingBilinear2d(scale_factor=2) ) self.proj1 = nn.Sequential( nn.Conv2d(768, proj_dims, 1), nn.GroupNorm(8, proj_dims), nn.GELU(), nn.UpsamplingBilinear2d(scale_factor=4) ) self.proj2 = nn.Sequential( nn.Conv2d(768, proj_dims, 1), nn.GroupNorm(8, proj_dims), nn.GELU(), nn.UpsamplingBilinear2d(scale_factor=8) ) self.decoder = nn.ModuleList([ UpConv(proj_dims+1, proj_dims, 3, 1), UpConv(proj_dims, proj_dims, 3,1), UpConv(proj_dims, proj_dims, 3, 1), UpConv(proj_dims, proj_dims, 3,1), UpConv(proj_dims, 1, 1, flag=False) ]) self.attn_weight = nn.Parameter(torch.ones(1, 1, 24, 24)) self.attn_bias = nn.Parameter(torch.zeros(1, 1, 24, 24)) self.init_weights() if args.enc == "spt": self.v_enc = spt(pretrained=args.MODEL.pretrain+'ViT-B-16.pt', num_tokens=args.num_tokens, patch_size=args.patch_size) self.v_enc.init_weights() elif args.enc == "vpt":
self.v_enc = vpt(pretrained=args.MODEL.pretrain+'ViT-B-16.pt')
1
2023-12-13 08:00:28+00:00
8k
qitan/devops-backend-lite
apps/dashboard/views.py
[ { "identifier": "Search", "path": "common/utils/ElasticSearchAPI.py", "snippet": "class Search(BaseSearch):\n def __init__(self, prefix=False, **kwargs):\n if kwargs.get('index', None) and prefix:\n if isinstance(kwargs['index'], string_types):\n kwargs['index'] = f\"{ELASTICSEARCH_PREFIX}{kwargs['index']}\"\n elif isinstance(kwargs['index'], list):\n kwargs['index'] = [\n f\"{ELASTICSEARCH_PREFIX}{i}\" for i in kwargs['index']]\n elif isinstance(kwargs['index'], tuple):\n kwargs['index'] = tuple(\n f\"{ELASTICSEARCH_PREFIX}{i}\" for i in kwargs['index'])\n else:\n raise Exception('索引名称格式错误!')\n super(Search, self).__init__(**kwargs)" }, { "identifier": "Product", "path": "dbapp/model/model_cmdb.py", "snippet": "class Product(TimeAbstract, CommonParent):\n name = models.CharField(max_length=100, unique=True, verbose_name='产品')\n alias = models.CharField(max_length=128, default='', verbose_name='产品别名')\n region = models.ForeignKey(\n Region, blank=True, null=True, on_delete=models.PROTECT, verbose_name='区域')\n desc = models.TextField(verbose_name='详情描述', null=True, blank=True)\n prefix = models.CharField(\n max_length=100, null=True, blank=True, verbose_name='前缀')\n managers = models.JSONField(default=dict, verbose_name='负责人',\n help_text='存储格式 对象: {\"product\": userid, \"develop\": userid};product: 产品负责人, develop: 技术负责人;值为int类型,存储用户ID.')\n\n def __str__(self):\n return self.name\n\n class ExtMeta:\n related = True\n dashboard = True\n icon = 'asset4'\n\n class Meta:\n db_table = 'cmdb_product'\n verbose_name = '产品'\n verbose_name_plural = verbose_name + '管理'" }, { "identifier": "MicroApp", "path": "dbapp/model/model_cmdb.py", "snippet": "class MicroApp(TimeAbstract):\n appid = models.CharField(max_length=250, db_index=True, unique=True, verbose_name='应用ID',\n help_text='应用唯一标识,无需填写')\n name = models.CharField(max_length=128, verbose_name='应用')\n alias = models.CharField(max_length=128, blank=True, verbose_name='别名')\n project = models.ForeignKey(\n Project, on_delete=models.PROTECT, null=True, blank=True, verbose_name='项目')\n creator = models.ForeignKey(UserProfile, on_delete=models.PROTECT, null=True, blank=True, verbose_name='应用创建人',\n help_text='前端不需要传递')\n repo = models.JSONField(default=dict, verbose_name='仓库地址',\n help_text='{\"id\": id, \"name\": name, \"http_url_to_repo\": url}')\n target = models.JSONField(default=get_default_value, verbose_name='JAR包配置',\n help_text='默认:default, {\"default\": \"default\", \"custom\": \"xxx/a.war\"}')\n team_members = models.JSONField(\n default=get_default_team_members, verbose_name=\"团队成员组\")\n category = models.CharField(\n max_length=128, blank=True, null=True, verbose_name='应用分类')\n template = models.JSONField(default=dict, verbose_name='应用配置',\n help_text='从数据字典接口获取,对应项的key为TEMPLATE, 数据格式为对象.\\n对应项的extra属性.\\n参数说明:\\nstrategy: 策略配置\\n - replicas: 副本, integer\\n - revisionHistoryLimit: 保留副本, integer\\n - minReadySeconds: 更新等待时间, integer\\n - maxSurge/maxUnavailable: 比例缩放 \\n\\nresources: 资源配额\\n - limits.cpu: CPU限制\\n - limits.memory: 内存限制\\n - requests.cpu: CPU请求\\n - requests.memory: 内存请求 \\n\\nenv: 环境变量, 数组[{\"name\": \"env1\", \"value\": \"value1\"}]\\n\\ncommand: 启动命令, 字符串')\n language = models.CharField(\n max_length=32, default='java', verbose_name='开发语言')\n build_command = models.CharField(max_length=250, blank=True, null=True, verbose_name='构建命令',\n help_text='根据应用开发语言, 从getKey(\"LANGUAGE\")获取数据, 取出extra字段的build值')\n multiple_app = models.BooleanField(\n default=False, blank=True, verbose_name='多应用标志')\n multiple_ids = models.JSONField(default=list, verbose_name='多应用关联ID列表')\n dockerfile = models.JSONField(default=get_default_value, verbose_name='Dockerfile配置',\n help_text='默认:{default: null}, 可选: {\"default|默认\": null, \"project|使用项目Dockerfile\": \"project\", \"custom|自定义Dockerfile\": \"\"}')\n online = models.BooleanField(default=True, blank=True, verbose_name='上线下线',\n help_text='应用上线/下线状态标记, 下线状态的应用禁止发布.')\n desc = models.TextField(verbose_name='描述', null=True, blank=True)\n notify = models.JSONField(default=dict, verbose_name='消息通知')\n can_edit = models.JSONField(default=list, verbose_name='管理人员',\n help_text='有权限编辑该应用的人员ID\\n格式为数组, 如[1,2]')\n is_k8s = models.CharField(max_length=8, default='k8s', choices=G_DEPLOY_TYPE, verbose_name='部署方式',\n help_text=f'默认k8s, 可选: {dict(G_DEPLOY_TYPE)}')\n modules = models.JSONField(default=list, verbose_name='工程模块')\n\n def __str__(self):\n return '[%s]%s' % (self.name, self.alias)\n\n class ExtMeta:\n related = True\n dashboard = True\n icon = 'component'\n\n class Meta:\n db_table = 'cmdb_microapp'\n default_permissions = ()\n ordering = ['-created_time']\n verbose_name = '应用'\n verbose_name_plural = verbose_name + '管理'" }, { "identifier": "get_datadict", "path": "common/ext_fun.py", "snippet": "def get_datadict(name, config=0, default_value=None):\n \"\"\"\n 从数据字典获取数据\n \"\"\"\n try:\n qs = DataDict.objects.get(key=name)\n except BaseException as e:\n return default_value\n if config:\n ret = json.loads(qs.extra)\n else:\n ret = {'id': qs.id, 'key': qs.key,\n 'value': qs.value, 'desc': qs.desc}\n return ret" }, { "identifier": "get_time_range", "path": "common/ext_fun.py", "snippet": "def get_time_range(request):\n \"\"\"\n 获取时间轴\n \"\"\"\n type_range = request.query_params.get('range_type', 'static')\n if type_range == 'static':\n time_range = request.query_params.get('range', '6-months')\n else:\n time_range = request.query_params.getlist('range[]', None)\n if not time_range:\n time_range = '6-months'\n period = time_period(time_range, type_range)\n time_line = timeline_generate(period, format_type='cmdb')\n # 时间刻度, 以小时为刻度则删除年份\n time_line_x = [i.split(' ')[-1]\n for i in time_line] if period['name'] == 'hours' else time_line\n return period, time_line, time_line_x" }, { "identifier": "CustomModelViewSet", "path": "common/extends/viewsets.py", "snippet": "class CustomModelViewSet(viewsets.ModelViewSet):\n \"\"\"\n A viewset that provides default `create()`, `retrieve()`, `update()`,\n `partial_update()`, `destroy()` and `list()` actions.\n \"\"\"\n\n def get_permission_from_role(self, request):\n try:\n perms = request.user.roles.values(\n 'permissions__method',\n ).distinct()\n return [p['permissions__method'] for p in perms]\n except AttributeError:\n return []\n\n def extend_filter(self, queryset):\n return queryset\n\n def get_queryset(self):\n \"\"\"\n Get the list of items for this view.\n This must be an iterable, and may be a queryset.\n Defaults to using `self.queryset`.\n\n This method should always be used rather than accessing `self.queryset`\n directly, as `self.queryset` gets evaluated only once, and those results\n are cached for all subsequent requests.\n\n You may want to override this if you need to provide different\n querysets depending on the incoming request.\n\n (Eg. return a list of items that is specific to the user)\n \"\"\"\n assert self.queryset is not None, (\n \"'%s' should either include a `queryset` attribute, \"\n \"or override the `get_queryset()` method.\"\n % self.__class__.__name__\n )\n queryset = self.extend_filter(self.queryset)\n if isinstance(queryset, QuerySet):\n # Ensure queryset is re-evaluated on each request.\n queryset = queryset.all()\n return queryset.distinct()\n\n @action(methods=['GET'], url_path='count', detail=False)\n def count(self, request, *args, **kwargs):\n queryset = self.get_queryset()\n return Response({'code': 20000, 'data': queryset.count()})\n\n def create(self, request, *args, **kwargs):\n try:\n request.data['name'] = request.data['name'].strip(\n ' ').replace(' ', '-')\n except BaseException as e:\n print('exception ', str(e))\n serializer = self.get_serializer(data=request.data)\n if not serializer.is_valid():\n return Response({'code': 40000, 'status': 'failed', 'message': serializer.errors})\n try:\n self.perform_create(serializer)\n except BaseException as e:\n return Response({'code': 50000, 'status': 'failed', 'message': str(e)})\n log_audit(request, action_type=self.serializer_class.Meta.model.__name__, action='创建', content='',\n data=serializer.data)\n\n data = {'data': serializer.data, 'status': 'success', 'code': 20000}\n return Response(data)\n\n def list(self, request, pk=None, *args, **kwargs):\n queryset = self.filter_queryset(self.get_queryset())\n page_size = request.query_params.get('page_size')\n pagination.PageNumberPagination.page_size = page_size\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n serializer = self.get_serializer(queryset, many=True)\n data = {'data': {'total': queryset.count(), 'items': serializer.data},\n 'code': 20000, 'status': 'success'}\n return Response(data)\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n partial = kwargs.pop('partial', False)\n try:\n request.data['name'] = request.data['name'].strip(\n ' ').replace(' ', '-')\n except BaseException as e:\n logger.warning(f'不包含name字段: {str(e)}')\n serializer = self.get_serializer(\n instance, data=request.data, partial=partial)\n if not serializer.is_valid():\n return Response({'code': 40000, 'status': 'failed', 'message': str(serializer.errors)})\n try:\n self.perform_update(serializer)\n except BaseException as e:\n logger.exception(f'更新失败,原因:{e}')\n return Response({'code': 50000, 'status': 'failed', 'message': str(e)})\n\n if getattr(instance, '_prefetched_objects_cache', None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # forcibly invalidate the prefetch cache on the instance.\n instance._prefetched_objects_cache = {}\n\n log_audit(request, self.serializer_class.Meta.model.__name__, '更新', content=f\"更新对象:{instance}\",\n data=serializer.data, old_data=self.serializer_class(instance).data)\n\n data = {'data': serializer.data, 'status': 'success', 'code': 20000}\n return Response(data)\n\n def retrieve(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n data = {'data': serializer.data, 'code': 20000, 'status': 'success'}\n return Response(data)\n\n def destroy(self, request, *args, **kwargs):\n \"\"\"\n TODO: 删除操作物理删除 or 逻辑删除(增加删除标记字段)\n \"\"\"\n instance = self.get_object()\n try:\n self.perform_destroy(instance)\n except ProtectedError:\n # 存在关联数据,不可删除\n return Response({'code': 50000, 'status': 'failed', 'message': '存在关联数据,禁止删除!'})\n except BaseException as e:\n logger.exception(f'删除数据发生错误 {e}, {e.__class__}')\n return Response({'code': 50000, 'status': 'failed', 'message': f'删除异常: {str(e)}'})\n log_audit(request, self.serializer_class.Meta.model.__name__,\n '删除', content=f\"删除对象:{instance}\")\n\n return Response({'code': 20000, 'status': 'success', 'msg': ''})" }, { "identifier": "CMDB_RELATED_TYPE", "path": "common/variables.py", "snippet": "CMDB_RELATED_TYPE = (\n (0, '无关联'),\n (1, '关系型数据库表关联'),\n (2, 'ElasticSearch索引关联')\n)" }, { "identifier": "DASHBOARD_CONFIG", "path": "common/variables.py", "snippet": "DASHBOARD_CONFIG = {\n 'cmdb': [\n {\"key\": \"区域\", \"value\": \"cmdb.product\", \"type\": \"rds\"},\n {\"key\": \"项目\", \"value\": \"cmdb.project\", \"type\": \"rds\"},\n {\"key\": \"应用\", \"value\": \"cmdb.microapp\", \"type\": \"rds\"},\n {\"key\": \"应用模块\", \"value\": \"cmdb.appinfo\", \"type\": \"rds\"}\n ]\n}" }, { "identifier": "DASHBOARD_TIME_FORMAT_T", "path": "common/variables.py", "snippet": "DASHBOARD_TIME_FORMAT_T = {'years': '%Y', 'months': '%Y-%m', 'days': '%Y-%m-%d', 'hours': \"%Y-%m-%d %H:00:00\",\n 'minutes': \"%Y-%m-%d %H:%M:00\", 'seconds': \"%Y-%m-%d %H:%M:%S\"}" }, { "identifier": "DASHBOARD_TIME_FORMAT_T_ES", "path": "common/variables.py", "snippet": "DASHBOARD_TIME_FORMAT_T_ES = {'years': 'yyyy', 'months': 'yyyy-MM', 'days': 'yyyy-MM-dd',\n 'hours': \"yyyy-MM-dd HH:00:00\", 'minutes': \"yyyy-MM-dd HH:mm:00\",\n 'seconds': \"yyyy-MM-dd HH:mm:ss\"}" }, { "identifier": "DashBoard", "path": "dbapp/model/model_dashboard.py", "snippet": "class DashBoard(TimeAbstract):\n name = models.CharField(max_length=128, unique=True, verbose_name='名称')\n config = models.JSONField(default=list, verbose_name='配置')\n type = models.CharField(max_length=16, choices=DASHBOARD_TYPE, default='index',\n verbose_name='报表类型', help_text=f\"报表类型: {dict(DASHBOARD_TYPE)}\")\n creator = models.ForeignKey(\n UserProfile, on_delete=models.CASCADE, verbose_name='创建人')\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = 'dashboard_dashboard'\n default_permissions = ()\n verbose_name = '报表配置'\n verbose_name_plural = verbose_name + '管理'" }, { "identifier": "BuildJob", "path": "dbapp/model/model_deploy.py", "snippet": "class BuildJob(TimeAbstract):\n \"\"\"\n 持续构建模型\n \"\"\"\n order_id = models.IntegerField(default=0, verbose_name='发布工单ID')\n appid = models.CharField(max_length=250, default='0',\n verbose_name='应用ID', help_text='应用唯一标识,无需填写')\n appinfo_id = models.IntegerField(\n default=0, db_index=True, verbose_name='应用模块ID')\n deployer = models.ForeignKey(UserProfile, verbose_name='发布人', blank=True, related_name='deployer', null=True,\n default=None, on_delete=models.SET_NULL)\n # {0: 未构建, 1: 构建成功, 2: 构建失败, 3: 构建中, 4: 作废}\n status = models.SmallIntegerField(default=3, choices=G_CI_STATUS, verbose_name=\"状态\",\n help_text=f\"状态值: {dict(G_CI_STATUS)}\")\n queue_number = models.IntegerField(default=0, verbose_name='队列ID')\n build_number = models.IntegerField(default=0, verbose_name='构建ID')\n commits = models.JSONField(default=dict, verbose_name='提交信息')\n commit_tag = models.JSONField(default=dict, verbose_name='提交类型',\n help_text='label可选: heads|tags\\nname: 具体的分支或者标签\\n{\"label\": \"heads\", \"name\": \"master\"}')\n # {0: 构建, 1: 构建发布}\n is_deploy = models.SmallIntegerField(default=0, verbose_name='构建发布',\n help_text='是否构建完后进行发布, {0: 不发布, 1: 发布}')\n jenkins_flow = models.TextField(\n verbose_name='jenkins pipeline', blank=True, null=True, default=\"\")\n image = models.CharField(max_length=250, blank=True,\n null=True, verbose_name='容器镜像')\n sync_status = models.SmallIntegerField(default=0, choices=G_IMAGE_SYNC_STAT, verbose_name='镜像同步状态',\n help_text=f\"{dict(G_IMAGE_SYNC_STAT)}, 默认0\")\n modules = models.CharField(\n max_length=250, blank=True, null=True, verbose_name='工程模块')\n batch_uuid = models.CharField(\n max_length=40, null=True, blank=True, verbose_name='批量部署标识')\n\n @property\n def job_name(self):\n try:\n appinfo_obj = AppInfo.objects.get(id=self.appinfo_id)\n job_name = f'{appinfo_obj.environment.name}-{appinfo_obj.app.category.split(\".\")[-1]}-{appinfo_obj.app.project.name}-{appinfo_obj.app.name.split(\".\")[-1]}'.lower(\n )\n except AppInfo.DoesNotExist:\n job_name = ''\n return job_name\n\n def __str__(self):\n return '%s-%s-%s' % (self.order_id, self.appinfo_id, self.image)\n\n class Meta:\n db_table = 'deploy_buildjob'\n default_permissions = ()\n ordering = ['-id']" } ]
import random import operator import logging from functools import reduce from jira import Project from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.decorators import action from django.db.models import Q, Count, Sum, Avg from django.db.models.query import QuerySet from django.apps import apps from django.db.models.functions import ExtractWeek, ExtractYear, ExtractDay, ExtractMonth from elasticsearch_dsl import Q as EQ from common.utils.ElasticSearchAPI import Search from dbapp.model.model_cmdb import Product, MicroApp from common.ext_fun import get_datadict, get_time_range from common.extends.viewsets import CustomModelViewSet from common.variables import CMDB_RELATED_TYPE, DASHBOARD_CONFIG, DASHBOARD_TIME_FORMAT_T, DASHBOARD_TIME_FORMAT_T_ES from dbapp.model.model_dashboard import DashBoard from dashboard.serializers import DashBoardSerializers from dbapp.model.model_deploy import BuildJob
4,831
#!/usr/bin/env python # -*- encoding: utf-8 -*- ''' @Author : Charles Lai @Contact : [email protected] @Time : 2021/12/27 17:47 @FileName: views.py @Blog : https://imaojia.com ''' logger = logging.getLogger(__name__) class LiveCheck(APIView): """ 探针检测 """ permission_classes = [] def get(self, request, format=None): return Response('PONG')
#!/usr/bin/env python # -*- encoding: utf-8 -*- ''' @Author : Charles Lai @Contact : [email protected] @Time : 2021/12/27 17:47 @FileName: views.py @Blog : https://imaojia.com ''' logger = logging.getLogger(__name__) class LiveCheck(APIView): """ 探针检测 """ permission_classes = [] def get(self, request, format=None): return Response('PONG')
class DashBoardViewSet(CustomModelViewSet):
5
2023-12-13 03:09:32+00:00
8k
abing7k/redroid-script
redroid.py
[ { "identifier": "Gapps", "path": "stuffs/gapps.py", "snippet": "class Gapps(General):\n dl_links = {\n \"x86_64\": [\"https://cfhcable.dl.sourceforge.net/project/opengapps/x86_64/20220503/open_gapps-x86_64-10.0-pico-20220503.zip\", \"5fb186bfb7bed8925290f79247bec4cf\"],\n \"x86\": [\"https://cfhcable.dl.sourceforge.net/project/opengapps/x86/20220503/open_gapps-x86-10.0-pico-20220503.zip\", \"7fc75ec9bdca8def07bad306345ce877\"],\n \"arm64-v8a\": [\"https://cfhcable.dl.sourceforge.net/project/opengapps/arm64/20220503/open_gapps-arm64-10.0-pico-20220503.zip\", \"2feaf25d03530892c6146687ffa08bc2\"],\n \"armeabi-v7a\": [\"https://cfhcable.dl.sourceforge.net/project/opengapps/arm/20220215/open_gapps-arm-10.0-pico-20220215.zip\", \"1d00ffa4594734d477b10f2e0ee19c0b\"]\n }\n arch = host()\n print(\"arch: \"+str(arch))\n download_loc = get_download_dir()\n dl_link = dl_links[arch[0]][0]\n dl_file_name = os.path.join(download_loc, \"open_gapps.zip\")\n act_md5 = dl_links[arch[0]][1]\n copy_dir = \"./gapps\"\n extract_to = \"/tmp/ogapps/extract\"\n non_apks = [\n \"vending-common.tar.lz\",\n \"defaultetc-common.tar.lz\",\n \"defaultframework-common.tar.lz\",\n \"googlepixelconfig-common.tar.lz\"\n ]\n\n if arch == ('arm64-v8a', 64):\n skip_1 = 'setupwizarddefault-x86_64.tar.lz'\n skip_2 = \"setupwizardtablet-x86_64.tar.lz\"\n \n if arch == ('x86_64', 64):\n skip_1 = 'setupwizarddefault-arm64.tar.lz'\n skip_2 = \"setupwizardtablet-arm64.tar.lz\"\n skip = [\n skip_1,\n skip_2\n ]\n\n def download(self):\n print_color(\"Downloading OpenGapps now .....\", bcolors.GREEN)\n super().download()\n\n def copy(self):\n if os.path.exists(self.copy_dir):\n shutil.rmtree(self.copy_dir)\n if not os.path.exists(self.extract_to):\n os.makedirs(self.extract_to)\n if not os.path.exists(os.path.join(self.extract_to, \"appunpack\")):\n os.makedirs(os.path.join(self.extract_to, \"appunpack\"))\n\n for lz_file in os.listdir(os.path.join(self.extract_to, \"Core\")):\n for d in os.listdir(os.path.join(self.extract_to, \"appunpack\")):\n shutil.rmtree(os.path.join(self.extract_to, \"appunpack\", d))\n if lz_file not in self.skip:\n if lz_file not in self.non_apks:\n print(\" Processing app package : \"+os.path.join(self.extract_to, \"Core\", lz_file))\n run([\"tar\", \"--lzip\", \"-xvf\", os.path.join(self.extract_to, \"Core\", lz_file), \"-C\", os.path.join(self.extract_to, \"appunpack\")])\n app_name = os.listdir(os.path.join(self.extract_to, \"appunpack\"))[0]\n xx_dpi = os.listdir(os.path.join(self.extract_to, \"appunpack\", app_name))[0]\n app_priv = os.listdir(os.path.join(self.extract_to, \"appunpack\", app_name, \"nodpi\"))[0]\n app_src_dir = os.path.join(self.extract_to, \"appunpack\", app_name, xx_dpi, app_priv)\n for app in os.listdir(app_src_dir):\n shutil.copytree(os.path.join(app_src_dir, app), os.path.join(self.copy_dir, \"system\", \"priv-app\", app), dirs_exist_ok=True)\n else:\n print(\" Processing extra package : \"+os.path.join(self.extract_to, \"Core\", lz_file))\n run([\"tar\", \"--lzip\", \"-xvf\", os.path.join(self.extract_to, \"Core\", lz_file), \"-C\", os.path.join(self.extract_to, \"appunpack\")])\n app_name = os.listdir(os.path.join(self.extract_to, \"appunpack\"))[0]\n common_content_dirs = os.listdir(os.path.join(self.extract_to, \"appunpack\", app_name, \"common\"))\n for ccdir in common_content_dirs:\n shutil.copytree(os.path.join(self.extract_to, \"appunpack\", app_name, \"common\", ccdir), os.path.join(self.copy_dir, \"system\", ccdir), dirs_exist_ok=True)" }, { "identifier": "Magisk", "path": "stuffs/magisk.py", "snippet": "class Magisk(General):\n download_loc = get_download_dir()\n dl_link = \"https://mgb1.androidfilehost.com/dl/_E1ugpo3KLudP2K-WauRfQ/1702724403/10620683726822077179/Magisk+Delta+25206+canary+%284dbd8358%29.apk\"\n dl_file_name = os.path.join(download_loc, \"magisk.apk\")\n extract_to = \"/tmp/magisk_unpack\"\n copy_dir = \"./magisk\"\n magisk_dir = os.path.join(copy_dir, \"system\", \"etc\", \"init\", \"magisk\")\n machine = host()\n oringinal_bootanim = \"\"\"\nservice bootanim /system/bin/bootanimation\n class core animation\n user graphics\n group graphics audio\n disabled\n oneshot\n ioprio rt 0\n task_profiles MaxPerformance\n \n\"\"\"\n bootanim_component = \"\"\"\non post-fs-data\n start logd\n exec u:r:su:s0 root root -- /system/etc/init/magisk/magisk{arch} --auto-selinux --setup-sbin /system/etc/init/magisk\n exec u:r:su:s0 root root -- /system/etc/init/magisk/magiskpolicy --live --magisk \"allow * magisk_file lnk_file *\"\n mkdir /sbin/.magisk 700\n mkdir /sbin/.magisk/mirror 700\n mkdir /sbin/.magisk/block 700\n copy /system/etc/init/magisk/config /sbin/.magisk/config\n rm /dev/.magisk_unblock\n exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --post-fs-data\n wait /dev/.magisk_unblock 40\n rm /dev/.magisk_unblock\n\non zygote-start\n exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --service\n\non property:sys.boot_completed=1\n mkdir /data/adb/magisk 755\n exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --boot-complete\n exec -- /system/bin/sh -c \"if [ ! -e /data/data/io.github.huskydg.magisk ] ; then pm install /system/etc/init/magisk/magisk.apk ; fi\"\n \non property:init.svc.zygote=restarting\n exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart\n \non property:init.svc.zygote=stopped\n exec u:r:su:s0 root root -- /sbin/magisk --auto-selinux --zygote-restart\n \"\"\".format(arch=machine[1])\n\n def download(self):\n if os.path.isfile(self.dl_file_name):\n os.remove(self.dl_file_name)\n print_color(\"Downloading latest Magisk-Delta now .....\", bcolors.GREEN)\n download_file(self.dl_link, self.dl_file_name) \n\n def copy(self):\n if os.path.exists(self.copy_dir):\n shutil.rmtree(self.copy_dir)\n if not os.path.exists(self.magisk_dir):\n os.makedirs(self.magisk_dir, exist_ok=True)\n\n if not os.path.exists(os.path.join(self.copy_dir, \"sbin\")):\n os.makedirs(os.path.join(self.copy_dir, \"sbin\"), exist_ok=True)\n\n print_color(\"Copying magisk libs now ...\", bcolors.GREEN)\n \n lib_dir = os.path.join(self.extract_to, \"lib\", self.machine[0])\n for parent, dirnames, filenames in os.walk(lib_dir):\n for filename in filenames:\n o_path = os.path.join(lib_dir, filename) \n filename = re.search('lib(.*)\\.so', filename)\n n_path = os.path.join(self.magisk_dir, filename.group(1))\n shutil.copyfile(o_path, n_path)\n run([\"chmod\", \"+x\", n_path])\n shutil.copyfile(self.dl_file_name, os.path.join(self.magisk_dir,\"magisk.apk\") )\n\n # Updating Magisk from Magisk manager will modify bootanim.rc, \n # So it is necessary to backup the original bootanim.rc.\n bootanim_path = os.path.join(self.copy_dir, \"system\", \"etc\", \"init\", \"bootanim.rc\")\n gz_filename = os.path.join(bootanim_path)+\".gz\"\n with gzip.open(gz_filename,'wb') as f_gz:\n f_gz.write(self.oringinal_bootanim.encode('utf-8'))\n with open(bootanim_path, \"w\") as initfile:\n initfile.write(self.oringinal_bootanim+self.bootanim_component)\n\n os.chmod(bootanim_path, 0o644)" }, { "identifier": "Ndk", "path": "stuffs/ndk.py", "snippet": "class Ndk(General):\n download_loc = get_download_dir()\n copy_dir = \"./ndk\"\n dl_link = \"https://github.com/supremegamers/vendor_google_proprietary_ndk_translation-prebuilt/archive/181d9290a69309511185c4417ba3d890b3caaaa8.zip\"\n dl_file_name = os.path.join(download_loc, \"libndktranslation.zip\")\n extract_to = \"/tmp/libndkunpack\"\n act_md5 = \"0beff55f312492f24d539569d84f5bfb\"\n# init_rc_component = \"\"\"\n# # Enable native bridge for target executables\n# on early-init\n# mount binfmt_misc binfmt_misc /proc/sys/fs/binfmt_misc\n\n# on property:ro.enable.native.bridge.exec=1\n# copy /system/etc/binfmt_misc/arm_exe /proc/sys/fs/binfmt_misc/register\n# copy /system/etc/binfmt_misc/arm_dyn /proc/sys/fs/binfmt_misc/register\n# copy /system/etc/binfmt_misc/arm64_exe /proc/sys/fs/binfmt_misc/register\n# copy /system/etc/binfmt_misc/arm64_dyn /proc/sys/fs/binfmt_misc/register\n# \"\"\"\n \n def download(self):\n print_color(\"Downloading libndk now .....\", bcolors.GREEN)\n super().download()\n\n def copy(self):\n if os.path.exists(self.copy_dir):\n shutil.rmtree(self.copy_dir)\n run([\"chmod\", \"+x\", self.extract_to, \"-R\"])\n \n print_color(\"Copying libndk library files ...\", bcolors.GREEN)\n shutil.copytree(os.path.join(self.extract_to, \"vendor_google_proprietary_ndk_translation-prebuilt-181d9290a69309511185c4417ba3d890b3caaaa8\", \"prebuilts\"), os.path.join(self.copy_dir, \"system\"), dirs_exist_ok=True)\n\n init_path = os.path.join(self.copy_dir, \"system\", \"etc\", \"init\", \"ndk_translation.rc\")\n os.chmod(init_path, 0o644)\n # if not os.path.isfile(init_path):\n # os.makedirs(os.path.dirname(init_path), exist_ok=True)\n # with open(init_path, \"w\") as initfile:\n # initfile.write(self.init_rc_component)" }, { "identifier": "Widevine", "path": "stuffs/widevine.py", "snippet": "class Widevine(General):\n def __init__(self, android_version) -> None:\n super().__init__()\n self.android_version = android_version\n self.dl_link = self.dl_links[self.machine[0]][android_version][0]\n self.act_md5 = self.dl_links[self.machine[0]][android_version][1]\n\n download_loc = get_download_dir()\n machine = host()\n copy_dir = \"./widevine\"\n dl_links = {\n # \"x86\": {\n # \"11.0.0\": [\"https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/48d1076a570837be6cdce8252d5d143363e37cc1.zip\",\n # \"f587b8859f9071da4bca6cea1b9bed6a\"]\n # },\n \"x86_64\": {\n \"11.0.0\": [\"https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/48d1076a570837be6cdce8252d5d143363e37cc1.zip\",\n \"f587b8859f9071da4bca6cea1b9bed6a\"],\n \"12.0.0\": [\"https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/3bba8b6e9dd5ffad5b861310433f7e397e9366e8.zip\",\n \"3e147bdeeb7691db4513d93cfa6beb23\"],\n \"13.0.0\": [\"https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/a8524d608431573ef1c9313822d271f78728f9a6.zip\",\n \"5c55df61da5c012b4e43746547ab730f\"]\n },\n # \"armeabi-v7a\":\n # {\n # \"11.0.0\": [\"https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/7b6e37ef0b63408f7d0232e67192020ba0aa402b.zip\",\n # \"3c3a136dc926ae5fc07826359720dbab\"]\n # },\n \"arm64-v8a\": {\n \"11.0.0\": [\"https://github.com/supremegamers/vendor_google_proprietary_widevine-prebuilt/archive/a1a19361d36311bee042da8cf4ced798d2c76d98.zip\",\n \"fed6898b5cfd2a908cb134df97802554\"]\n }\n }\n dl_file_name = os.path.join(download_loc, \"widevine.zip\")\n extract_to = \"/tmp/widevineunpack\"\n\n def download(self):\n print_color(\"Downloading widevine now .....\", bcolors.GREEN)\n super().download()\n\n def copy(self):\n if os.path.exists(self.copy_dir):\n shutil.rmtree(self.copy_dir)\n run([\"chmod\", \"+x\", self.extract_to, \"-R\"])\n print_color(\"Copying widevine library files ...\", bcolors.GREEN)\n name = re.findall(\"([a-zA-Z0-9]+)\\.zip\", self.dl_link)[0]\n shutil.copytree(os.path.join(self.extract_to, \"vendor_google_proprietary_widevine-prebuilt-\"+name,\n \"prebuilts\"), os.path.join(self.copy_dir, \"vendor\"), dirs_exist_ok=True)\n\n if \"x86\" in self.machine[0] and self.android_version == \"11.0.0\":\n os.symlink(\"./libprotobuf-cpp-lite-3.9.1.so\",\n os.path.join(self.copy_dir, \"vendor\", \"lib\", \"libprotobuf-cpp-lite.so\"))\n os.symlink(\"./libprotobuf-cpp-lite-3.9.1.so\", os.path.join(self.copy_dir,\n \"vendor\", \"lib64\", \"libprotobuf-cpp-lite.so\"))\n\n for file in os.listdir(os.path.join(self.copy_dir, \"vendor\", \"etc\", \"init\")):\n if file.endswith('.rc'):\n os.chmod(os.path.join(self.copy_dir, \"vendor\", \"etc\", \"init\", file), 0o644)" } ]
import argparse import tools.helper as helper import subprocess from stuffs.gapps import Gapps from stuffs.magisk import Magisk from stuffs.ndk import Ndk from stuffs.widevine import Widevine
4,324
#!/usr/bin/env python3 def main(): dockerfile = "" tags = [] parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-a', '--android-version', dest='android', help='Specify the Android version to build', default='11.0.0', choices=['13.0.0', '12.0.0', '12.0.0_64only', '11.0.0', '10.0.0', '9.0.0', '8.1.0']) parser.add_argument('-g', '--install-gapps', dest='gapps', help='Install OpenGapps to ReDroid', action='store_true') parser.add_argument('-n', '--install-ndk-translation', dest='ndk', help='Install libndk translation files', action='store_true') parser.add_argument('-m', '--install-magisk', dest='magisk', help='Install Magisk ( Bootless )', action='store_true') parser.add_argument('-w', '--install-widevine', dest='widevine', help='Integrate Widevine DRM (L3)', action='store_true') parser.add_argument('-c', '--container', dest='container', default='docker', help='Specify container type', choices=['docker', 'podman']) args = parser.parse_args() dockerfile = dockerfile + \ "FROM redroid/redroid:{}-latest\n".format( args.android) tags.append(args.android) if args.gapps: Gapps().install() dockerfile = dockerfile + "COPY gapps /\n" tags.append("gapps") if args.ndk: if args.android in ["11.0.0", "12.0.0", "12.0.0_64only"]: arch = helper.host()[0] if arch == "x86" or arch == "x86_64":
#!/usr/bin/env python3 def main(): dockerfile = "" tags = [] parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-a', '--android-version', dest='android', help='Specify the Android version to build', default='11.0.0', choices=['13.0.0', '12.0.0', '12.0.0_64only', '11.0.0', '10.0.0', '9.0.0', '8.1.0']) parser.add_argument('-g', '--install-gapps', dest='gapps', help='Install OpenGapps to ReDroid', action='store_true') parser.add_argument('-n', '--install-ndk-translation', dest='ndk', help='Install libndk translation files', action='store_true') parser.add_argument('-m', '--install-magisk', dest='magisk', help='Install Magisk ( Bootless )', action='store_true') parser.add_argument('-w', '--install-widevine', dest='widevine', help='Integrate Widevine DRM (L3)', action='store_true') parser.add_argument('-c', '--container', dest='container', default='docker', help='Specify container type', choices=['docker', 'podman']) args = parser.parse_args() dockerfile = dockerfile + \ "FROM redroid/redroid:{}-latest\n".format( args.android) tags.append(args.android) if args.gapps: Gapps().install() dockerfile = dockerfile + "COPY gapps /\n" tags.append("gapps") if args.ndk: if args.android in ["11.0.0", "12.0.0", "12.0.0_64only"]: arch = helper.host()[0] if arch == "x86" or arch == "x86_64":
Ndk().install()
2
2023-12-06 09:03:05+00:00
8k
zvict/papr
models/model.py
[ { "identifier": "normalize_vector", "path": "models/utils.py", "snippet": "def normalize_vector(x, eps=0.):\n # assert(x.shape[-1] == 3)\n return x / (torch.norm(x, dim=-1, keepdim=True) + eps)" }, { "identifier": "create_learning_rate_fn", "path": "models/utils.py", "snippet": "def create_learning_rate_fn(optimizer, max_steps, args, debug=False):\n \"\"\"Create learning rate schedule.\"\"\"\n if args.type == \"none\":\n return None\n\n if args.warmup > 0:\n warmup_start_factor = 1e-16\n else:\n warmup_start_factor = 1.0\n\n warmup_fn = lr_scheduler.LinearLR(optimizer,\n start_factor=warmup_start_factor,\n end_factor=1.0,\n total_iters=args.warmup,\n verbose=debug)\n\n if args.type == \"linear\":\n decay_fn = lr_scheduler.LinearLR(optimizer,\n start_factor=1.0,\n end_factor=0.,\n total_iters=max_steps - args.warmup,\n verbose=debug)\n schedulers = [warmup_fn, decay_fn]\n milestones = [args.warmup]\n\n elif args.type == \"cosine\":\n cosine_steps = max(max_steps - args.warmup, 1)\n decay_fn = lr_scheduler.CosineAnnealingLR(optimizer,\n T_max=cosine_steps,\n verbose=debug)\n schedulers = [warmup_fn, decay_fn]\n milestones = [args.warmup]\n\n elif args.type == \"cosine-hlfperiod\":\n cosine_steps = max(max_steps - args.warmup, 1) * 2\n decay_fn = lr_scheduler.CosineAnnealingLR(optimizer,\n T_max=cosine_steps,\n verbose=debug)\n schedulers = [warmup_fn, decay_fn]\n milestones = [args.warmup]\n\n elif args.type == \"exp\":\n decay_fn = lr_scheduler.ExponentialLR(optimizer,\n gamma=args.gamma,\n verbose=debug)\n schedulers = [warmup_fn, decay_fn]\n milestones = [args.warmup]\n\n elif args.type == \"stop\":\n decay_fn = lr_scheduler.StepLR(\n optimizer, step_size=1, gamma=0.0, verbose=debug)\n schedulers = [warmup_fn, decay_fn]\n milestones = [args.warmup]\n\n else:\n raise NotImplementedError\n\n schedule_fn = lr_scheduler.SequentialLR(optimizer,\n schedulers=schedulers,\n milestones=milestones,\n verbose=debug)\n\n return schedule_fn" }, { "identifier": "add_points_knn", "path": "models/utils.py", "snippet": "def add_points_knn(coords, influ_scores, add_num, k, comb_type=\"mean\", sample_type=\"random\", sample_k=10, point_features=None):\n \"\"\"\n Add points to the point cloud by kNN\n \"\"\"\n pc = KDTree(coords)\n N = coords.shape[0]\n\n # Step 1: Determine where to add points\n if N <= add_num and \"random\" in comb_type:\n inds = np.random.choice(N, add_num, replace=True)\n query_coords = coords[inds, :]\n elif N <= add_num:\n query_coords = coords\n inds = list(range(N))\n else:\n if sample_type == \"random\":\n inds = np.random.choice(N, add_num, replace=False)\n query_coords = coords[inds, :]\n elif sample_type == \"top-knn-std\":\n assert k >= 2\n nns_dists, nns_inds = pc.query(coords, k=sample_k)\n inds = np.argsort(nns_dists.std(axis=-1))[-add_num:]\n query_coords = coords[inds, :]\n elif sample_type == \"top-knn-mean\":\n assert k >= 2\n nns_dists, nns_inds = pc.query(coords, k=sample_k)\n inds = np.argsort(nns_dists.mean(axis=-1))[-add_num:]\n query_coords = coords[inds, :]\n elif sample_type == \"top-knn-max\":\n assert k >= 2\n nns_dists, nns_inds = pc.query(coords, k=sample_k)\n inds = np.argsort(nns_dists.max(axis=-1))[-add_num:]\n query_coords = coords[inds, :]\n elif sample_type == \"top-knn-min\":\n assert k >= 2\n nns_dists, nns_inds = pc.query(coords, k=sample_k)\n inds = np.argsort(nns_dists.min(axis=-1))[-add_num:]\n query_coords = coords[inds, :]\n elif sample_type == \"influ-scores-max\":\n inds = np.argsort(influ_scores.squeeze())[-add_num:]\n query_coords = coords[inds, :]\n elif sample_type == \"influ-scores-min\":\n inds = np.argsort(influ_scores.squeeze())[:add_num]\n query_coords = coords[inds, :]\n else:\n raise NotImplementedError\n\n # Step 2: Add points by kNN\n new_features = None\n if comb_type == \"duplicate\":\n noise = np.random.randn(3).astype(np.float32)\n noise = noise / np.linalg.norm(noise)\n noise *= k\n new_coords = (query_coords + noise)\n new_influ_scores = influ_scores[inds, :]\n if point_features is not None:\n new_features = point_features[inds, :]\n else:\n nns_dists, nns_inds = pc.query(query_coords, k=k+1)\n nns_dists = nns_dists.astype(np.float32)\n nns_dists = nns_dists[:, 1:]\n nns_inds = nns_inds[:, 1:]\n if comb_type == \"mean\":\n new_coords = coords[nns_inds, :].mean(\n axis=-2) # (Nq, k, 3) -> (Nq, 3)\n new_influ_scores = influ_scores[nns_inds, :].mean(axis=-2)\n if point_features is not None:\n new_features = point_features[nns_inds, :].mean(axis=-2)\n elif comb_type == \"random\":\n rnd_w = np.random.uniform(\n 0, 1, (query_coords.shape[0], k)).astype(np.float32)\n rnd_w /= rnd_w.sum(axis=-1, keepdims=True)\n new_coords = (coords[nns_inds, :] *\n rnd_w.reshape(-1, k, 1)).sum(axis=-2)\n new_influ_scores = (\n influ_scores[nns_inds, :] * rnd_w.reshape(-1, k, 1)).sum(axis=-2)\n if point_features is not None:\n new_features = (\n point_features[nns_inds, :] * rnd_w.reshape(-1, k, 1)).sum(axis=-2)\n elif comb_type == \"random-softmax\":\n rnd_w = np.random.randn(\n query_coords.shape[0], k).astype(np.float32)\n rnd_w = scipy.special.softmax(rnd_w, axis=-1)\n new_coords = (coords[nns_inds, :] *\n rnd_w.reshape(-1, k, 1)).sum(axis=-2)\n new_influ_scores = (\n influ_scores[nns_inds, :] * rnd_w.reshape(-1, k, 1)).sum(axis=-2)\n if point_features is not None:\n new_features = (\n point_features[nns_inds, :] * rnd_w.reshape(-1, k, 1)).sum(axis=-2)\n elif comb_type == \"weighted\":\n new_coords = (coords[nns_inds, :] * (1 / (nns_dists + 1e-6)).reshape(-1, k, 1)).sum(\n axis=-2) / (1 / (nns_dists + 1e-6)).sum(axis=-1, keepdims=True)\n new_influ_scores = (influ_scores[nns_inds, :] * (1 / (nns_dists + 1e-6)).reshape(-1, k, 1)).sum(\n axis=-2) / (1 / (nns_dists + 1e-6)).sum(axis=-1, keepdims=True)\n if point_features is not None:\n new_features = (point_features[nns_inds, :] * (1 / (nns_dists + 1e-6)).reshape(-1, k, 1)).sum(\n axis=-2) / (1 / (nns_dists + 1e-6)).sum(axis=-1, keepdims=True)\n else:\n raise NotImplementedError\n return new_coords, len(new_coords), new_influ_scores, new_features" }, { "identifier": "activation_func", "path": "models/utils.py", "snippet": "def activation_func(act_type='leakyrelu', neg_slope=0.2, inplace=True, num_channels=128, a=1., b=1., trainable=False):\n act_type = act_type.lower()\n if act_type == 'none':\n layer = nn.Identity()\n elif act_type == 'leakyrelu':\n layer = nn.LeakyReLU(neg_slope, inplace)\n elif act_type == 'prelu':\n layer = nn.PReLU(num_channels)\n elif act_type == 'relu':\n layer = nn.ReLU(inplace)\n elif act_type == '+1':\n layer = PlusOneActivation()\n elif act_type == 'relu+1':\n layer = nn.Sequential(nn.ReLU(inplace), PlusOneActivation())\n elif act_type == 'tanh':\n layer = nn.Tanh()\n elif act_type == 'shifted_tanh':\n layer = ShiftedTanh()\n elif act_type == 'sigmoid':\n layer = nn.Sigmoid()\n elif act_type == 'gelu':\n layer = nn.GELU()\n elif act_type == 'gaussian':\n layer = GaussianActivation(a, trainable)\n elif act_type == 'quadratic':\n layer = QuadraticActivation(a, trainable)\n elif act_type == 'multi-quadratic':\n layer = MultiQuadraticActivation(a, trainable)\n elif act_type == 'laplacian':\n layer = LaplacianActivation(a, trainable)\n elif act_type == 'super-gaussian':\n layer = SuperGaussianActivation(a, b, trainable)\n elif act_type == 'expsin':\n layer = ExpSinActivation(a, trainable)\n elif act_type == 'clamp':\n layer = Clamp(0, 1)\n elif 'sine' in act_type:\n layer = Sine(factor=a)\n elif 'softplus' in act_type:\n a, b, c = [float(i) for i in act_type.split('_')[1:]]\n print(\n 'Softplus activation: a={:.2f}, b={:.2f}, c={:.2f}'.format(a, b, c))\n layer = SoftplusActivation(a, b, c)\n else:\n raise NotImplementedError(\n 'activation layer [{:s}] is not found'.format(act_type))\n return layer" }, { "identifier": "get_mapping_mlp", "path": "models/mlp.py", "snippet": "def get_mapping_mlp(args, use_amp=False, amp_dtype=torch.float16):\n return MappingMLP(args.mapping_mlp, inp_dim=args.shading_code_dim, out_dim=args.mapping_mlp.out_dim, use_amp=use_amp, amp_dtype=amp_dtype)" }, { "identifier": "get_transformer", "path": "models/tx.py", "snippet": "def get_transformer(args, seq_len, v_extra_dim=0, k_extra_dim=0, q_extra_dim=0, eps=1e-6, use_amp=False, amp_dtype=torch.float16):\n k_dim_map = {\n 1: [3, 3, 3],\n }\n k_dim = k_dim_map[args.k_type]\n\n q_dim_map = {\n 1: [3],\n }\n q_dim = q_dim_map[args.q_type]\n\n v_dim_map = {\n 1: [3, 3],\n }\n v_dim = v_dim_map[args.v_type]\n\n return Transformer(d_k=k_dim, d_q=q_dim, d_v=v_dim, d_model=args.d_model, d_out=args.d_out, seq_len=seq_len,\n embed_args=args.embed, block_args=args.block, d_ko=k_extra_dim, d_qo=q_extra_dim,\n d_vo=v_extra_dim, eps=eps, use_amp=use_amp, amp_dtype=amp_dtype)" }, { "identifier": "get_generator", "path": "models/renderer.py", "snippet": "def get_generator(args, in_c, out_c, use_amp=False, amp_dtype=torch.float16):\n if args.type == \"small-unet\":\n opt = args.small_unet\n return SmallUNet(in_c, out_c, bilinear=opt.bilinear, single=opt.single, norm=opt.norm, last_act=opt.last_act,\n use_amp=use_amp, amp_dtype=amp_dtype, affine_layer=opt.affine_layer)\n elif args.type == \"mlp\":\n opt = args.mlp\n return MLPGenerator(inp_dim=in_c, num_layers=opt.num_layers, num_channels=opt.num_channels, out_dim=out_c,\n act_type=opt.act_type, last_act_type=opt.last_act_type, use_wn=opt.use_wn, a=opt.act_a, b=opt.act_b,\n trainable=opt.act_trainable, skip_layers=opt.skip_layers, bias=opt.bias, half_layers=opt.half_layers,\n residual_layers=opt.residual_layers, residual_dims=opt.residual_dims)\n else:\n raise NotImplementedError(\n 'generator type [{:d}] is not supported'.format(args.type))" } ]
import torch import torch.nn as nn import torch.nn.functional as F import math import os import numpy as np from .utils import normalize_vector, create_learning_rate_fn, add_points_knn, activation_func from .mlp import get_mapping_mlp from .tx import get_transformer from .renderer import get_generator
4,447
def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) class PAPR(nn.Module): def __init__(self, args, device='cuda'): super(PAPR, self).__init__() self.args = args self.eps = args.eps self.device = device self.use_amp = args.use_amp self.amp_dtype = torch.float16 if args.amp_dtype == 'float16' else torch.bfloat16 self.scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp) point_opt = args.geoms.points pc_feat_opt = args.geoms.point_feats bkg_feat_opt = args.geoms.background self.register_buffer('select_k', torch.tensor( point_opt.select_k, device=device, dtype=torch.int32)) self.coord_scale = args.dataset.coord_scale if point_opt.load_path: if point_opt.load_path.endswith('.pth') or point_opt.load_path.endswith('.pt'): points = torch.load(point_opt.load_path, map_location='cpu') points = np.asarray(points).astype(np.float32) np.random.shuffle(points) points = points[:args.max_num_pts, :] points = torch.from_numpy(points).float() print("Loaded points from {}, shape: {}, dtype {}".format(point_opt.load_path, points.shape, points.dtype)) print("Loaded points scale: ", points[:, 0].min(), points[:, 0].max(), points[:, 1].min(), points[:, 1].max(), points[:, 2].min(), points[:, 2].max()) else: # Initialize point positions pt_init_center = [i * self.coord_scale for i in point_opt.init_center] pt_init_scale = [i * self.coord_scale for i in point_opt.init_scale] if point_opt.init_type == 'sphere': # initial points on a sphere points = self._sphere_pc(pt_init_center, point_opt.num, pt_init_scale) elif point_opt.init_type == 'cube': # initial points in a cube points = self._cube_normal_pc(pt_init_center, point_opt.num, pt_init_scale) else: raise NotImplementedError("Point init type [{:s}] is not found".format(point_opt.init_type)) print("Scratch points scale: ", points[:, 0].min(), points[:, 0].max(), points[:, 1].min(), points[:, 1].max(), points[:, 2].min(), points[:, 2].max()) self.points = torch.nn.Parameter(points, requires_grad=True) # Initialize point influence scores self.points_influ_scores = torch.nn.Parameter(torch.ones( points.shape[0], 1, device=device) * point_opt.influ_init_val, requires_grad=True) # Initialize mapping MLP, only if fine-tuning with IMLE for the exposure control self.mapping_mlp = None if args.models.mapping_mlp.use: self.mapping_mlp = get_mapping_mlp( args.models, use_amp=self.use_amp, amp_dtype=self.amp_dtype) # Initialize UNet if args.models.use_renderer: tx_opt = args.models.transformer feat_dim = tx_opt.embed.d_ff_out if tx_opt.embed.share_embed else tx_opt.embed.value.d_ff_out self.renderer = get_generator(args.models.renderer.generator, in_c=feat_dim, out_c=3, use_amp=self.use_amp, amp_dtype=self.amp_dtype) print("Renderer: ", count_parameters(self.renderer)) else: assert (args.models.transformer.embed.share_embed and args.models.transformer.embed.d_ff_out == 3) or \ (not args.models.transformer.embed.share_embed and args.models.transformer.embed.value.d_ff_out == 3), \ "Value embedding MLP should have output dim 3 if not using renderer" # Initialize background score and features if bkg_feat_opt.init_type == 'random': bkg_feat_init_func = torch.rand elif bkg_feat_opt.init_type == 'zeros': bkg_feat_init_func = torch.zeros elif bkg_feat_opt.init_type == 'ones': bkg_feat_init_func = torch.ones else: raise NotImplementedError( "Background init type [{:s}] is not found".format(bkg_feat_opt.init_type)) feat_dim = 3 self.bkg_feats = nn.Parameter(bkg_feat_init_func(bkg_feat_opt.seq_len, feat_dim, device=device) * bkg_feat_opt.init_scale, requires_grad=bkg_feat_opt.learnable) self.bkg_score = torch.tensor(bkg_feat_opt.constant, device=device, dtype=torch.float32).reshape(1) # Initialize point features self.use_pc_feats = pc_feat_opt.use_ink or pc_feat_opt.use_inq or pc_feat_opt.use_inv if self.use_pc_feats: self.pc_feats = nn.Parameter(torch.randn(points.shape[0], pc_feat_opt.dim), requires_grad=True) print("Point features: ", self.pc_feats.shape, self.pc_feats.min(), self.pc_feats.max(), self.pc_feats.mean(), self.pc_feats.std()) v_extra_dim = 0 k_extra_dim = 0 q_extra_dim = 0 if pc_feat_opt.use_inv: v_extra_dim = self.pc_feats.shape[-1] print("Using v_extra_dim: ", v_extra_dim) if pc_feat_opt.use_ink: k_extra_dim = self.pc_feats.shape[-1] print("Using k_extra_dim: ", k_extra_dim) if pc_feat_opt.use_inq: q_extra_dim = self.pc_feats.shape[-1] print("Using q_extra_dim: ", q_extra_dim) self.last_act = activation_func(args.models.last_act) # Initialize proximity attention layer(s)
def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) class PAPR(nn.Module): def __init__(self, args, device='cuda'): super(PAPR, self).__init__() self.args = args self.eps = args.eps self.device = device self.use_amp = args.use_amp self.amp_dtype = torch.float16 if args.amp_dtype == 'float16' else torch.bfloat16 self.scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp) point_opt = args.geoms.points pc_feat_opt = args.geoms.point_feats bkg_feat_opt = args.geoms.background self.register_buffer('select_k', torch.tensor( point_opt.select_k, device=device, dtype=torch.int32)) self.coord_scale = args.dataset.coord_scale if point_opt.load_path: if point_opt.load_path.endswith('.pth') or point_opt.load_path.endswith('.pt'): points = torch.load(point_opt.load_path, map_location='cpu') points = np.asarray(points).astype(np.float32) np.random.shuffle(points) points = points[:args.max_num_pts, :] points = torch.from_numpy(points).float() print("Loaded points from {}, shape: {}, dtype {}".format(point_opt.load_path, points.shape, points.dtype)) print("Loaded points scale: ", points[:, 0].min(), points[:, 0].max(), points[:, 1].min(), points[:, 1].max(), points[:, 2].min(), points[:, 2].max()) else: # Initialize point positions pt_init_center = [i * self.coord_scale for i in point_opt.init_center] pt_init_scale = [i * self.coord_scale for i in point_opt.init_scale] if point_opt.init_type == 'sphere': # initial points on a sphere points = self._sphere_pc(pt_init_center, point_opt.num, pt_init_scale) elif point_opt.init_type == 'cube': # initial points in a cube points = self._cube_normal_pc(pt_init_center, point_opt.num, pt_init_scale) else: raise NotImplementedError("Point init type [{:s}] is not found".format(point_opt.init_type)) print("Scratch points scale: ", points[:, 0].min(), points[:, 0].max(), points[:, 1].min(), points[:, 1].max(), points[:, 2].min(), points[:, 2].max()) self.points = torch.nn.Parameter(points, requires_grad=True) # Initialize point influence scores self.points_influ_scores = torch.nn.Parameter(torch.ones( points.shape[0], 1, device=device) * point_opt.influ_init_val, requires_grad=True) # Initialize mapping MLP, only if fine-tuning with IMLE for the exposure control self.mapping_mlp = None if args.models.mapping_mlp.use: self.mapping_mlp = get_mapping_mlp( args.models, use_amp=self.use_amp, amp_dtype=self.amp_dtype) # Initialize UNet if args.models.use_renderer: tx_opt = args.models.transformer feat_dim = tx_opt.embed.d_ff_out if tx_opt.embed.share_embed else tx_opt.embed.value.d_ff_out self.renderer = get_generator(args.models.renderer.generator, in_c=feat_dim, out_c=3, use_amp=self.use_amp, amp_dtype=self.amp_dtype) print("Renderer: ", count_parameters(self.renderer)) else: assert (args.models.transformer.embed.share_embed and args.models.transformer.embed.d_ff_out == 3) or \ (not args.models.transformer.embed.share_embed and args.models.transformer.embed.value.d_ff_out == 3), \ "Value embedding MLP should have output dim 3 if not using renderer" # Initialize background score and features if bkg_feat_opt.init_type == 'random': bkg_feat_init_func = torch.rand elif bkg_feat_opt.init_type == 'zeros': bkg_feat_init_func = torch.zeros elif bkg_feat_opt.init_type == 'ones': bkg_feat_init_func = torch.ones else: raise NotImplementedError( "Background init type [{:s}] is not found".format(bkg_feat_opt.init_type)) feat_dim = 3 self.bkg_feats = nn.Parameter(bkg_feat_init_func(bkg_feat_opt.seq_len, feat_dim, device=device) * bkg_feat_opt.init_scale, requires_grad=bkg_feat_opt.learnable) self.bkg_score = torch.tensor(bkg_feat_opt.constant, device=device, dtype=torch.float32).reshape(1) # Initialize point features self.use_pc_feats = pc_feat_opt.use_ink or pc_feat_opt.use_inq or pc_feat_opt.use_inv if self.use_pc_feats: self.pc_feats = nn.Parameter(torch.randn(points.shape[0], pc_feat_opt.dim), requires_grad=True) print("Point features: ", self.pc_feats.shape, self.pc_feats.min(), self.pc_feats.max(), self.pc_feats.mean(), self.pc_feats.std()) v_extra_dim = 0 k_extra_dim = 0 q_extra_dim = 0 if pc_feat_opt.use_inv: v_extra_dim = self.pc_feats.shape[-1] print("Using v_extra_dim: ", v_extra_dim) if pc_feat_opt.use_ink: k_extra_dim = self.pc_feats.shape[-1] print("Using k_extra_dim: ", k_extra_dim) if pc_feat_opt.use_inq: q_extra_dim = self.pc_feats.shape[-1] print("Using q_extra_dim: ", q_extra_dim) self.last_act = activation_func(args.models.last_act) # Initialize proximity attention layer(s)
transformer = get_transformer(args.models.transformer,
5
2023-12-08 19:51:42+00:00
8k
AdaCheng/EgoThink
models/instruct_blip/models/blip2_models/blip2.py
[ { "identifier": "dist_utils", "path": "models/instruct_blip/common/dist_utils.py", "snippet": "def setup_for_distributed(is_master):\n def print(*args, **kwargs):\ndef is_dist_avail_and_initialized():\ndef get_world_size():\ndef get_rank():\ndef is_main_process():\ndef init_distributed_mode(args):\ndef get_dist_info():\ndef main_process(func):\n def wrapper(*args, **kwargs):\ndef download_cached_file(url, check_hash=True, progress=False):\n def get_cached_file_path():" }, { "identifier": "download_cached_file", "path": "models/instruct_blip/common/dist_utils.py", "snippet": "def download_cached_file(url, check_hash=True, progress=False):\n \"\"\"\n Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.\n If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded.\n \"\"\"\n\n def get_cached_file_path():\n # a hack to sync the file path across processes\n parts = torch.hub.urlparse(url)\n filename = os.path.basename(parts.path)\n cached_file = os.path.join(timm_hub.get_cache_dir(), filename)\n\n return cached_file\n\n if is_main_process():\n timm_hub.download_cached_file(url, check_hash, progress)\n\n if is_dist_avail_and_initialized():\n dist.barrier()\n\n return get_cached_file_path()" }, { "identifier": "is_url", "path": "models/instruct_blip/common/utils.py", "snippet": "def is_url(url_or_filename):\n parsed = urlparse(url_or_filename)\n return parsed.scheme in (\"http\", \"https\")" }, { "identifier": "MetricLogger", "path": "models/instruct_blip/common/logger.py", "snippet": "class MetricLogger(object):\n def __init__(self, delimiter=\"\\t\"):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(type(self).__name__, attr)\n )\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n loss_str.append(\"{}: {}\".format(name, str(meter)))\n return self.delimiter.join(loss_str)\n\n def global_avg(self):\n loss_str = []\n for name, meter in self.meters.items():\n loss_str.append(\"{}: {:.4f}\".format(name, meter.global_avg))\n return self.delimiter.join(loss_str)\n\n def synchronize_between_processes(self):\n for meter in self.meters.values():\n meter.synchronize_between_processes()\n\n def add_meter(self, name, meter):\n self.meters[name] = meter\n\n def log_every(self, iterable, print_freq, header=None):\n i = 0\n if not header:\n header = \"\"\n start_time = time.time()\n end = time.time()\n iter_time = SmoothedValue(fmt=\"{avg:.4f}\")\n data_time = SmoothedValue(fmt=\"{avg:.4f}\")\n space_fmt = \":\" + str(len(str(len(iterable)))) + \"d\"\n log_msg = [\n header,\n \"[{0\" + space_fmt + \"}/{1}]\",\n \"eta: {eta}\",\n \"{meters}\",\n \"time: {time}\",\n \"data: {data}\",\n ]\n if torch.cuda.is_available():\n log_msg.append(\"max mem: {memory:.0f}\")\n log_msg = self.delimiter.join(log_msg)\n MB = 1024.0 * 1024.0\n for obj in iterable:\n data_time.update(time.time() - end)\n yield obj\n iter_time.update(time.time() - end)\n if i % print_freq == 0 or i == len(iterable) - 1:\n eta_seconds = iter_time.global_avg * (len(iterable) - i)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n if torch.cuda.is_available():\n print(\n log_msg.format(\n i,\n len(iterable),\n eta=eta_string,\n meters=str(self),\n time=str(iter_time),\n data=str(data_time),\n memory=torch.cuda.max_memory_allocated() / MB,\n )\n )\n else:\n print(\n log_msg.format(\n i,\n len(iterable),\n eta=eta_string,\n meters=str(self),\n time=str(iter_time),\n data=str(data_time),\n )\n )\n i += 1\n end = time.time()\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print(\n \"{} Total time: {} ({:.4f} s / it)\".format(\n header, total_time_str, total_time / len(iterable)\n )\n )" }, { "identifier": "BaseModel", "path": "models/instruct_blip/models/base_model.py", "snippet": "class BaseModel(nn.Module):\n \"\"\"Base class for models.\"\"\"\n\n def __init__(self):\n super().__init__()\n\n @property\n def device(self):\n return list(self.parameters())[0].device\n\n def load_checkpoint(self, url_or_filename):\n \"\"\"\n Load from a finetuned checkpoint.\n\n This should expect no mismatch in the model keys and the checkpoint keys.\n \"\"\"\n\n if is_url(url_or_filename):\n cached_file = download_cached_file(\n url_or_filename, check_hash=False, progress=True\n )\n checkpoint = torch.load(cached_file, map_location=\"cpu\")\n elif os.path.isfile(url_or_filename):\n checkpoint = torch.load(url_or_filename, map_location=\"cpu\")\n else:\n raise RuntimeError(\"checkpoint url or path is invalid\")\n\n if \"model\" in checkpoint.keys():\n state_dict = checkpoint[\"model\"]\n else:\n state_dict = checkpoint\n\n msg = self.load_state_dict(state_dict, strict=False)\n\n logging.info(\"Missing keys {}\".format(msg.missing_keys))\n logging.info(\"load checkpoint from %s\" % url_or_filename)\n\n return msg\n\n @classmethod\n def from_pretrained(cls, model_type):\n \"\"\"\n Build a pretrained model from default configuration file, specified by model_type.\n\n Args:\n - model_type (str): model type, specifying architecture and checkpoints.\n\n Returns:\n - model (nn.Module): pretrained or finetuned model, depending on the configuration.\n \"\"\"\n model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model\n model = cls.from_config(model_cfg)\n\n return model\n\n @classmethod\n def default_config_path(cls, model_type):\n assert (\n model_type in cls.PRETRAINED_MODEL_CONFIG_DICT\n ), \"Unknown model type {}\".format(model_type)\n return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type])\n\n def load_checkpoint_from_config(self, cfg, **kwargs):\n \"\"\"\n Load checkpoint as specified in the config file.\n\n If load_finetuned is True, load the finetuned model; otherwise, load the pretrained model.\n When loading the pretrained model, each task-specific architecture may define their\n own load_from_pretrained() method.\n \"\"\"\n load_finetuned = cfg.get(\"load_finetuned\", True)\n if load_finetuned:\n finetune_path = cfg.get(\"finetuned\", None)\n assert (\n finetune_path is not None\n ), \"Found load_finetuned is True, but finetune_path is None.\"\n self.load_checkpoint(url_or_filename=finetune_path)\n else:\n load_pretrained = cfg.get(\"load_pretrained\", True)\n if load_pretrained:\n # load pre-trained weights\n pretrain_path = cfg.get(\"pretrained\", None)\n assert \"Found load_finetuned is False, but pretrain_path is None.\"\n self.load_from_pretrained(url_or_filename=pretrain_path, **kwargs)\n\n\n def before_evaluation(self, **kwargs):\n pass\n\n def show_n_params(self, return_str=True):\n tot = 0\n for p in self.parameters():\n w = 1\n for x in p.shape:\n w *= x\n tot += w\n if return_str:\n if tot >= 1e6:\n return \"{:.1f}M\".format(tot / 1e6)\n else:\n return \"{:.1f}K\".format(tot / 1e3)\n else:\n return tot" }, { "identifier": "BertConfig", "path": "models/instruct_blip/models/blip2_models/Qformer.py", "snippet": "class BertEmbeddings(nn.Module):\nclass BertSelfAttention(nn.Module):\nclass BertSelfOutput(nn.Module):\nclass BertAttention(nn.Module):\nclass BertIntermediate(nn.Module):\nclass BertOutput(nn.Module):\nclass BertLayer(nn.Module):\nclass BertEncoder(nn.Module):\nclass BertPooler(nn.Module):\nclass BertPredictionHeadTransform(nn.Module):\nclass BertLMPredictionHead(nn.Module):\nclass BertOnlyMLMHead(nn.Module):\nclass BertPreTrainedModel(PreTrainedModel):\nclass BertModel(BertPreTrainedModel):\nclass BertLMHeadModel(BertPreTrainedModel):\nclass BertForMaskedLM(BertPreTrainedModel):\n def __init__(self, config):\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n query_embeds=None,\n past_key_values_length=0,\n ):\n def __init__(self, config, is_cross_attention):\n def save_attn_gradients(self, attn_gradients):\n def get_attn_gradients(self):\n def save_attention_map(self, attention_map):\n def get_attention_map(self):\n def transpose_for_scores(self, x):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, is_cross_attention=False):\n def prune_heads(self, heads):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n ):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states, input_tensor):\n def __init__(self, config, layer_num):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_value=None,\n output_attentions=False,\n query_length=0,\n ):\n def feed_forward_chunk(self, attention_output):\n def feed_forward_chunk_query(self, attention_output):\n def __init__(self, config):\n def forward(\n self,\n hidden_states,\n attention_mask=None,\n head_mask=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=False,\n output_hidden_states=False,\n return_dict=True,\n query_length=0,\n ):\n def create_custom_forward(module):\n def custom_forward(*inputs):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, hidden_states):\n def __init__(self, config):\n def forward(self, sequence_output):\n def _init_weights(self, module):\n def __init__(self, config, add_pooling_layer=False):\n def get_input_embeddings(self):\n def set_input_embeddings(self, value):\n def _prune_heads(self, heads_to_prune):\n def get_extended_attention_mask(\n self,\n attention_mask: Tensor,\n input_shape: Tuple[int],\n device: device,\n is_decoder: bool,\n has_query: bool = False,\n ) -> Tensor:\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n query_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n past_key_values=None,\n use_cache=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n is_decoder=False,\n ):\n def __init__(self, config):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n query_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n past_key_values=None,\n use_cache=True,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n return_logits=False,\n is_decoder=True,\n reduction=\"mean\",\n ):\n def prepare_inputs_for_generation(\n self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs\n ):\n def _reorder_cache(self, past, beam_idx):\n def __init__(self, config):\n def get_output_embeddings(self):\n def set_output_embeddings(self, new_embeddings):\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n position_ids=None,\n head_mask=None,\n query_embeds=None,\n encoder_hidden_states=None,\n encoder_attention_mask=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n return_logits=False,\n is_decoder=False,\n ):" }, { "identifier": "create_eva_vit_g", "path": "models/instruct_blip/models/eva_vit.py", "snippet": "def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision=\"fp16\"):\n model = VisionTransformer(\n img_size=img_size,\n patch_size=14,\n use_mean_pooling=False,\n embed_dim=1408,\n depth=39,\n num_heads=1408//88,\n mlp_ratio=4.3637,\n qkv_bias=True,\n drop_path_rate=drop_path_rate,\n norm_layer=partial(nn.LayerNorm, eps=1e-6),\n use_checkpoint=use_checkpoint,\n ) \n url = \"https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth\"\n cached_file = download_cached_file(\n url, check_hash=False, progress=True\n )\n state_dict = torch.load(cached_file, map_location=\"cpu\") \n interpolate_pos_embed(model,state_dict)\n \n incompatible_keys = model.load_state_dict(state_dict, strict=False)\n# print(incompatible_keys)\n \n if precision == \"fp16\":\n# model.to(\"cuda\") \n convert_weights_to_fp16(model)\n return model" }, { "identifier": "create_clip_vit_L", "path": "models/instruct_blip/models/clip_vit.py", "snippet": "def create_clip_vit_L(img_size=224,use_checkpoint=False,precision=\"fp16\"):\n model = VisionTransformer(\n input_resolution=img_size,\n patch_size=14,\n width=1024,\n layers=23,\n heads=16,\n use_grad_checkpointing=use_checkpoint,\n ) \n url = \"https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/clip_vit_L.pth\"\n cached_file = download_cached_file(\n url, check_hash=False, progress=True\n )\n state_dict = torch.load(cached_file, map_location=\"cpu\") \n interpolate_pos_embed(model,state_dict)\n \n incompatible_keys = model.load_state_dict(state_dict, strict=False)\n # print(incompatible_keys)\n \n if precision == \"fp16\":\n convert_weights_to_fp16(model)\n return model" } ]
import contextlib import logging import os import time import datetime import torch import torch.nn as nn import torch.distributed as dist import torch.nn.functional as F import spacy from ...common import dist_utils as dist_utils from ...common.dist_utils import download_cached_file from ...common.utils import is_url from ...common.logger import MetricLogger from ..base_model import BaseModel from ..blip2_models.Qformer import BertConfig, BertLMHeadModel from ..eva_vit import create_eva_vit_g from ..clip_vit import create_clip_vit_L from transformers import BertTokenizer
4,090
""" Copyright (c) 2023, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """
""" Copyright (c) 2023, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """
class Blip2Base(BaseModel):
4
2023-12-05 14:17:17+00:00
8k
3dlg-hcvc/cage
systems/base.py
[ { "identifier": "SaverMixin", "path": "utils/savermixins.py", "snippet": "class SaverMixin():\n def set_save_dir(self, stage):\n self.hparams.save_dir = os.path.join(self.logger.log_dir, 'images', stage) \n os.makedirs(self.hparams.save_dir, exist_ok=True)\n\n @property\n def save_dir(self):\n return self.hparams.save_dir\n \n def convert_format(self, data):\n if isinstance(data, np.ndarray):\n return data\n elif isinstance(data, torch.Tensor):\n return data.cpu().numpy()\n elif isinstance(data, list):\n return [self.convert_data(d) for d in data]\n elif isinstance(data, dict):\n return {k: self.convert_data(v) for k, v in data.items()}\n else:\n raise TypeError('Data must be in type numpy.ndarray, torch.Tensor, list or dict, getting', type(data))\n \n def get_save_path(self, filename):\n save_path = os.path.join(self.save_dir, filename)\n os.makedirs(os.path.dirname(save_path), exist_ok=True)\n return save_path\n \n def save_rgb_image(self, filename, img):\n imageio.imwrite(self.get_save_path(filename), img)\n \n def save_rgb_video(self, filename, stage='fit', filter=None):\n img_dir = os.path.join(self.logger.log_dir, 'images', stage)\n \n writer_graph = imageio.get_writer(os.path.join(img_dir, filename), fps=1)\n\n for file in sorted(os.listdir(img_dir)):\n if file.endswith('.png') and 'gt' not in file:\n if filter is not None:\n if filter in file:\n writer_graph.append_data(imageio.imread(os.path.join(img_dir, file)))\n else:\n writer_graph.append_data(imageio.imread(os.path.join(img_dir, file)))\n\n writer_graph.close()\n \n def save_json(self, filename, data):\n save_path = self.get_save_path(filename)\n with open(save_path, 'w') as f:\n json.dump(data, f)" }, { "identifier": "label_ref", "path": "utils/refs.py", "snippet": "" }, { "identifier": "viz_graph", "path": "utils/plot.py", "snippet": "def viz_graph(info_dict, res=256):\n '''\n Function to plot the directed graph\n\n Args:\n - info_dict (dict): output json containing the graph information\n - res (int): resolution of the image\n\n Returns:\n - img_arr (np.array): image array\n '''\n # build tree\n tree = info_dict['diffuse_tree']\n edges = []\n for node in tree:\n edges += [(node['id'], child) for child in node['children']]\n G = nx.DiGraph()\n G.add_edges_from(edges)\n\n # plot tree\n plt.figure(figsize=(res/100, res/100))\n\n colors = get_color(graph_color_ref, len(tree))\n pos = nx.nx_agraph.graphviz_layout(G, prog=\"twopi\", args=\"\")\n node_order = sorted(G.nodes())\n nx.draw(G, pos, node_color=colors, nodelist=node_order, edge_color='k', with_labels=False)\n \n buf = BytesIO()\n plt.savefig(buf, format=\"png\", dpi=100)\n buf.seek(0)\n img = Image.open(buf)\n img_arr = np.asarray(img)\n buf.close()\n plt.clf()\n plt.close()\n return img_arr[:, :, :3]" }, { "identifier": "make_grid", "path": "utils/plot.py", "snippet": "def make_grid(images, cols=5):\n \"\"\"\n Arrange list of images into a N x cols grid.\n \n Args:\n - images (list): List of Numpy arrays representing the images.\n - cols (int): Number of columns for the grid.\n \n Returns:\n - grid (numpy array): Numpy array representing the image grid.\n \"\"\"\n # Determine the dimensions of each image\n img_h, img_w, _ = images[0].shape\n rows = len(images) // cols\n \n # Initialize a blank canvas\n grid = np.zeros((rows * img_h, cols * img_w, 3), dtype=images[0].dtype)\n \n # Place each image onto the grid\n for idx, img in enumerate(images):\n y = (idx // cols) * img_h\n x = (idx % cols) * img_w\n grid[y: y + img_h, x: x + img_w] = img\n \n return grid" }, { "identifier": "add_text", "path": "utils/plot.py", "snippet": "def add_text(text, imgarr):\n '''\n Function to add text to image\n\n Args:\n - text (str): text to add\n - imgarr (np.array): image array\n\n Returns:\n - img (np.array): image array with text\n '''\n img = Image.fromarray(imgarr)\n I = ImageDraw.Draw(img)\n I.text((10, 10), text, fill='black')\n return np.asarray(img)" }, { "identifier": "rescale_axis", "path": "utils/render.py", "snippet": "def rescale_axis(jtype, axis_d, axis_o, box_center):\n '''\n Function to rescale the axis for rendering\n \n Args:\n - jtype (int): joint type\n - axis_d (np.array): axis direction\n - axis_o (np.array): axis origin\n - box_center (np.array): bounding box center\n\n Returns:\n - center (np.array): rescaled axis origin\n - axis_d (np.array): rescaled axis direction\n '''\n if jtype == 0 or jtype == 1:\n return [0., 0., 0.], [0., 0., 0.]\n if jtype == 3 or jtype == 4:\n center = box_center\n else:\n center = axis_o + np.dot(axis_d, box_center-axis_o) * axis_d\n return center.tolist(), axis_d.tolist()" }, { "identifier": "draw_boxes_axiss_anim", "path": "utils/render.py", "snippet": "def draw_boxes_axiss_anim(aabbs_0, aabbs_1, axiss, mode='graph', resolution=256, types=None):\n '''\n Function to draw the 3D bounding boxes and axes of the two frames\n\n Args:\n aabbs_0: list of trimesh objects for the bounding box of each part in the resting state\n aabbs_1: list of trimesh objects for the bounding box of each part in the open state\n axiss: list of trimesh objects for the axis of each part\n mode: \n 'graph' using palette corresponding to graph node, \n 'jtype' using palette corresponding to joint type, \n 'semantic' using palette corresponding to semantic label\n resolution: resolution of the rendered image\n types: ids corresponding to each joint type or semantic label, if mode is 'jtype' or 'semantic'\n '''\n n_parts = len(aabbs_0)\n ren_aabbs_0 = []\n ren_aabbs_1 = []\n ren_axiss = []\n if mode == 'graph':\n palette = graph_color_ref\n # Add meshes to the scene\n for i in range(n_parts):\n color = get_color_from_palette(palette, i)\n aabb_0 = pyrender.Mesh.from_trimesh(aabbs_0[i], smooth=False)\n aabb_0.primitives[0].color_0 = color.repeat(aabb_0.primitives[0].positions.shape[0], axis=0)\n ren_aabbs_0.append(aabb_0)\n aabb_1 = pyrender.Mesh.from_trimesh(aabbs_1[i], smooth=False)\n aabb_1.primitives[0].color_0 = color.repeat(aabb_1.primitives[0].positions.shape[0], axis=0)\n ren_aabbs_1.append(aabb_1)\n if axiss[i] is not None:\n axis = pyrender.Mesh.from_trimesh(axiss[i], smooth=False)\n axis.primitives[0].color_0 = color.repeat(axis.primitives[0].positions.shape[0], axis=0)\n ren_axiss.append(axis)\n else:\n ren_axiss.append(None) \n elif mode == 'jtype' or mode == 'semantic':\n assert types is not None\n palette = joint_color_ref if mode == 'jtype' else semantic_color_ref\n # Add meshes to the scene\n for i in range(n_parts):\n color = get_color_from_palette(palette, types[i])\n aabb_0 = pyrender.Mesh.from_trimesh(aabbs_0[i], smooth=False)\n aabb_0.primitives[0].color_0 = color.repeat(aabb_0.primitives[0].positions.shape[0], axis=0)\n ren_aabbs_0.append(aabb_0)\n aabb_1 = pyrender.Mesh.from_trimesh(aabbs_1[i], smooth=False)\n aabb_1.primitives[0].color_0 = color.repeat(aabb_1.primitives[0].positions.shape[0], axis=0)\n ren_aabbs_1.append(aabb_1)\n\n if axiss[i] is not None:\n axis = pyrender.Mesh.from_trimesh(axiss[i], smooth=False)\n ren_axiss.append(axis)\n else:\n ren_axiss.append(None)\n else:\n raise ValueError('mode must be either graph or type')\n\n img0 = render_anim_parts(ren_aabbs_0, ren_axiss, resolution=resolution)\n img1 = render_anim_parts(ren_aabbs_1, ren_axiss, resolution=resolution)\n return np.concatenate([img0, img1], axis=1)" }, { "identifier": "get_bbox_mesh_pair", "path": "utils/render.py", "snippet": "def get_bbox_mesh_pair(center, size, radius=0.01, jtype=None, jrange=None, axis_d=None, axis_o=None):\n '''\n Function to get the bounding box mesh pair\n\n Args:\n - center (np.array): bounding box center\n - size (np.array): bounding box size\n - radius (float): radius of the cylinder\n - jtype (int): joint type\n - jrange (list): joint range\n - axis_d (np.array): axis direction\n - axis_o (np.array): axis origin\n\n Returns:\n - trimesh_box (trimesh object): trimesh object for the bbox at resting state\n - trimesh_box_anim (trimesh object): trimesh object for the bbox at opening state\n '''\n\n size = np.clip(size, a_max=3, a_min=0.005)\n center = np.clip(center, a_max=3, a_min=-3)\n\n line_box = o3d.geometry.TriangleMesh()\n z_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=radius, height=size[2])\n y_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=radius, height=size[1])\n R_y = get_rotation_axis_angle(np.array([1., 0., 0.]), np.pi / 2)\n y_cylinder.rotate(R_y, center=(0, 0, 0))\n x_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=radius, height=size[0])\n R_x = get_rotation_axis_angle(np.array([0., 1., 0.]), np.pi / 2)\n x_cylinder.rotate(R_x, center=(0, 0, 0))\n \n \n z1 = deepcopy(z_cylinder)\n z1.translate(np.array([-size[0] / 2, size[1] / 2, 0.]))\n line_box += z1.translate(center[:3])\n z2 = deepcopy(z_cylinder)\n z2.translate(np.array([size[0] / 2, size[1] / 2, 0.]))\n line_box += z2.translate(center[:3])\n z3 = deepcopy(z_cylinder)\n z3.translate(np.array([-size[0] / 2, -size[1] / 2, 0.]))\n line_box += z3.translate(center[:3])\n z4 = deepcopy(z_cylinder)\n z4.translate(np.array([size[0] / 2, -size[1] / 2, 0.]))\n line_box += z4.translate(center[:3])\n \n y1 = deepcopy(y_cylinder)\n y1.translate(np.array([-size[0] / 2, 0., size[2] / 2]))\n line_box += y1.translate(center[:3])\n y2 = deepcopy(y_cylinder)\n y2.translate(np.array([size[0] / 2, 0., size[2] / 2]))\n line_box += y2.translate(center[:3])\n y3 = deepcopy(y_cylinder)\n y3.translate(np.array([-size[0] / 2, 0., -size[2] / 2]))\n line_box += y3.translate(center[:3])\n y4 = deepcopy(y_cylinder)\n y4.translate(np.array([size[0] / 2, 0., -size[2] / 2]))\n line_box += y4.translate(center[:3])\n \n x1 = deepcopy(x_cylinder)\n x1.translate(np.array([0., -size[1] / 2, size[2] / 2]))\n line_box += x1.translate(center[:3])\n x2 = deepcopy(x_cylinder)\n x2.translate(np.array([0., size[1] / 2, size[2] / 2]))\n line_box += x2.translate(center[:3])\n x3 = deepcopy(x_cylinder)\n x3.translate(np.array([0., -size[1] / 2, -size[2] / 2]))\n line_box += x3.translate(center[:3])\n x4 = deepcopy(x_cylinder)\n x4.translate(np.array([0., size[1] / 2, -size[2] / 2]))\n line_box += x4.translate(center[:3])\n\n # transform\n line_box_anim = deepcopy(line_box)\n if jtype == 2: # revolute\n theta = np.deg2rad(jrange[0])\n line_box_anim.translate(-axis_o)\n R = get_rotation_axis_angle(axis_d, theta)\n line_box_anim.rotate(R, center=(0, 0, 0))\n line_box_anim.translate(axis_o)\n elif jtype == 3: # prismatic\n dist = np.array(jrange[1])\n line_box_anim.translate(axis_d * dist)\n elif jtype == 4: # screw\n dist = np.array(jrange[1])\n theta = 0.25 * np.pi\n R = get_rotation_axis_angle(axis_d, theta)\n line_box_anim.translate(-axis_o)\n line_box_anim.rotate(R, center=(0, 0, 0))\n line_box_anim.translate(axis_o)\n line_box_anim.translate(axis_d * dist)\n elif jtype == 5: # continuous\n theta = 0.25 * np.pi\n R = get_rotation_axis_angle(axis_d, theta)\n line_box_anim.translate(-axis_o)\n line_box_anim.rotate(R, center=(0, 0, 0))\n line_box_anim.translate(axis_o)\n \n vertices = np.asarray(line_box.vertices)\n faces = np.asarray(line_box.triangles)\n trimesh_box = trimesh.Trimesh(vertices=vertices, faces=faces)\n trimesh_box.visual.vertex_colors = np.array([0.0, 1.0, 1.0, 1.0])\n \n vertices_anim = np.asarray(line_box_anim.vertices)\n faces_anim = np.asarray(line_box_anim.triangles)\n trimesh_box_anim = trimesh.Trimesh(vertices=vertices_anim, faces=faces_anim)\n trimesh_box_anim.visual.vertex_colors = np.array([0.0, 1.0, 1.0, 1.0])\n \n return trimesh_box, trimesh_box_anim" }, { "identifier": "get_axis_mesh", "path": "utils/render.py", "snippet": "def get_axis_mesh(k, axis_o, bbox_center, joint_type):\n '''\n Function to get the axis mesh\n\n Args:\n - k (np.array): axis direction\n - center (np.array): axis origin\n - bbox_center (np.array): bounding box center\n - joint_type (int): joint type\n '''\n if joint_type == 0 or joint_type == 1 or np.linalg.norm(k) == 0. :\n return None\n \n k = k / np.linalg.norm(k)\n\n if joint_type == 3 or joint_type == 4: # prismatic or screw\n axis_o = bbox_center\n else: # revolute or continuous\n axis_o = axis_o + np.dot(k, bbox_center-axis_o) * k\n axis = o3d.geometry.TriangleMesh.create_arrow(cylinder_radius=0.015, cone_radius=0.03, cylinder_height=1.0, cone_height=0.08)\n arrow = np.array([0., 0., 1.], dtype=np.float32)\n n = np.cross(arrow, k) \n rad = np.arccos(np.dot(arrow, k))\n R_arrow = get_rotation_axis_angle(n, rad)\n axis.rotate(R_arrow, center=(0, 0, 0))\n axis.translate(axis_o[:3])\n axis.compute_vertex_normals()\n vertices = np.asarray(axis.vertices)\n faces = np.asarray(axis.triangles)\n trimesh_axis = trimesh.Trimesh(vertices=vertices, faces=faces)\n trimesh_axis.visual.vertex_colors = np.array([0.5, 0.5, 0.5, 1.0])\n return trimesh_axis" } ]
import torch import models import numpy as np import lightning.pytorch as pl from diffusers import DDPMScheduler from utils.savermixins import SaverMixin from utils.refs import label_ref, joint_ref from utils.plot import viz_graph, make_grid, add_text from utils.render import rescale_axis, draw_boxes_axiss_anim, get_bbox_mesh_pair, get_axis_mesh
5,940
aabb_min = self.convert_format(x[:, 3:6]) center = (aabb_max + aabb_min) / 2. size = (aabb_max - aabb_min).clip(min=1e-3) j_type = torch.mean(x[:, 6:12], dim=1) j_type = self.convert_format((j_type+0.5) * 5).clip(min=1., max=5.).round() axis_d = self.convert_format(x[:, 12:15]) axis_d = axis_d / (np.linalg.norm(axis_d, axis=1, keepdims=True) + np.finfo(float).eps) axis_o = self.convert_format(x[:, 15:18]) j_range = (x[:, 18:20] + x[:, 20:22] + x[:, 22:24]) / 3 j_range = self.convert_format(j_range).clip(min=-1., max=1.) j_range[:, 0] = j_range[:, 0] * 360 j_range[:, 1] = j_range[:, 1] label = torch.mean(x[:, 24:30], dim=1) label = self.convert_format((label+0.8) * 5).clip(min=0., max=7.).round() return { 'center': center, 'size': size, 'type': j_type, 'axis_d': axis_d, 'axis_o': axis_o, 'range': j_range, 'label': label } def convert_json_graph_only(self, c, idx): out = {'diffuse_tree': []} n_nodes = c['n_nodes'][idx].item() par = c['parents'][idx].cpu().numpy().tolist() adj = c['adj'][idx].cpu().numpy() np.fill_diagonal(adj, 0) for i in range(n_nodes): node = {'id': i} node['parent'] = int(par[i]) node['children'] = [intchild for child in np.where(adj[i] == 1)[0] if child != par[i]] out['diffuse_tree'].append(node) return out def convert_json(self, x, c, idx): n_nodes = c['n_nodes'][idx].item() par = c['parents'][idx].cpu().numpy().tolist() adj = c['adj'][idx].cpu().numpy() np.fill_diagonal(adj, 0) # convert the data to original range data = self.convert_data_range(x) # convert to json format out = {'diffuse_tree': []} out['meta'] = { 'obj_cat': c['obj_cat'][idx], 'tree_hash': c['tree_hash'][idx] } for i in range(n_nodes): node = {'id': i} node['name'] = label_ref['bwd'][int(data['label'][i].item())] node['parent'] = int(par[i]) node['children'] = [int(child) for child in np.where(adj[i] == 1)[0] if child != par[i]] node['aabb'] = {} node['aabb']['center'] = data['center'][i].tolist() node['aabb']['size'] = data['size'][i].tolist() node['joint'] = {} node['joint']['type'] = joint_ref['bwd'][int(data['type'][i].item())] if node['joint']['type'] == 'fixed': node['joint']['range'] = [0., 0.] elif node['joint']['type'] == 'revolute': node['joint']['range'] = [0., float(data['range'][i][0])] elif node['joint']['type'] == 'continuous': node['joint']['range'] = [0., 360.] elif node['joint']['type'] == 'prismatic' or node['joint']['type'] == 'screw': node['joint']['range'] = [0., float(data['range'][i][1])] node['joint']['axis'] = {} # relocate the axis to visualize well axis_o, axis_d = rescale_axis(int(data['type'][i].item()), data['axis_d'][i], data['axis_o'][i], data['center'][i]) node['joint']['axis']['direction'] = axis_d node['joint']['axis']['origin'] = axis_o out['diffuse_tree'].append(node) return out # ------------------------------- visualizations ------------------------------- # def prepare_meshes(self, info_dict): ''' Function to prepare the bbox and axis meshes for visualization Args: - info_dict (dict): output json containing the graph information ''' tree = info_dict['diffuse_tree'] bbox_0, bbox_1, axiss, labels, jtypes = [], [], [], [], [] root_id = 0 # get root id for node in tree: if node['parent'] == -1: root_id = node['id'] for node in tree: # retrieve info box_cen = np.array(node['aabb']['center']) box_size = np.array(node['aabb']['size']) jrange = node['joint']['range'] jtype = node['joint']['type'] axis_d = np.array(node['joint']['axis']['direction']) axis_o = np.array(node['joint']['axis']['origin']) label = label_ref['fwd'][node['name']] jtype_id = joint_ref['fwd'][node['joint']['type']] # construct meshes for bbox if node['id'] == root_id or node['parent'] == root_id: # no transform bb_0, bb_1 = get_bbox_mesh_pair(box_cen, box_size, jtype=jtype_id, jrange=jrange, axis_d=axis_d, axis_o=axis_o) else: parent_id = node['parent'] jrange_p = tree[parent_id]['joint']['range'] jtype_p = tree[parent_id]['joint']['type'] jtype_p_id = joint_ref['fwd'][jtype_p] axis_d_p = np.array(tree[parent_id]['joint']['axis']['direction']) axis_o_p = np.array(tree[parent_id]['joint']['axis']['origin']) bb_0, bb_1 = get_bbox_mesh_pair(box_cen, box_size, jtype=jtype_p_id, jrange=jrange_p, axis_d=axis_d_p, axis_o=axis_o_p) # construct mesh for axis (the axis is not supporting transform for now)
class BaseSystem(pl.LightningModule, SaverMixin): def __init__(self, hparams): super().__init__() self.hparams.update(hparams) self.model = models.make(hparams.model.name, hparams.model) self.scheduler = DDPMScheduler(**self.hparams.scheduler.config) self.save_hyperparameters() def setup(self, stage: str): self.set_save_dir(stage) # config the logger dir for images def configure_optimizers(self): raise NotImplementedError def training_step(self, batch, batch_idx): raise NotImplementedError def validation_step(self, batch, batch_idx): raise NotImplementedError def predict_step(self, batch, batch_idx, dataloader_idx=None): raise NotImplementedError # ------------------------------- data converters ------------------------------- # def convert_data_range(self, x): x = x.reshape(-1, 30) # (K, 30) aabb_max = self.convert_format(x[:, 0:3]) aabb_min = self.convert_format(x[:, 3:6]) center = (aabb_max + aabb_min) / 2. size = (aabb_max - aabb_min).clip(min=1e-3) j_type = torch.mean(x[:, 6:12], dim=1) j_type = self.convert_format((j_type+0.5) * 5).clip(min=1., max=5.).round() axis_d = self.convert_format(x[:, 12:15]) axis_d = axis_d / (np.linalg.norm(axis_d, axis=1, keepdims=True) + np.finfo(float).eps) axis_o = self.convert_format(x[:, 15:18]) j_range = (x[:, 18:20] + x[:, 20:22] + x[:, 22:24]) / 3 j_range = self.convert_format(j_range).clip(min=-1., max=1.) j_range[:, 0] = j_range[:, 0] * 360 j_range[:, 1] = j_range[:, 1] label = torch.mean(x[:, 24:30], dim=1) label = self.convert_format((label+0.8) * 5).clip(min=0., max=7.).round() return { 'center': center, 'size': size, 'type': j_type, 'axis_d': axis_d, 'axis_o': axis_o, 'range': j_range, 'label': label } def convert_json_graph_only(self, c, idx): out = {'diffuse_tree': []} n_nodes = c['n_nodes'][idx].item() par = c['parents'][idx].cpu().numpy().tolist() adj = c['adj'][idx].cpu().numpy() np.fill_diagonal(adj, 0) for i in range(n_nodes): node = {'id': i} node['parent'] = int(par[i]) node['children'] = [intchild for child in np.where(adj[i] == 1)[0] if child != par[i]] out['diffuse_tree'].append(node) return out def convert_json(self, x, c, idx): n_nodes = c['n_nodes'][idx].item() par = c['parents'][idx].cpu().numpy().tolist() adj = c['adj'][idx].cpu().numpy() np.fill_diagonal(adj, 0) # convert the data to original range data = self.convert_data_range(x) # convert to json format out = {'diffuse_tree': []} out['meta'] = { 'obj_cat': c['obj_cat'][idx], 'tree_hash': c['tree_hash'][idx] } for i in range(n_nodes): node = {'id': i} node['name'] = label_ref['bwd'][int(data['label'][i].item())] node['parent'] = int(par[i]) node['children'] = [int(child) for child in np.where(adj[i] == 1)[0] if child != par[i]] node['aabb'] = {} node['aabb']['center'] = data['center'][i].tolist() node['aabb']['size'] = data['size'][i].tolist() node['joint'] = {} node['joint']['type'] = joint_ref['bwd'][int(data['type'][i].item())] if node['joint']['type'] == 'fixed': node['joint']['range'] = [0., 0.] elif node['joint']['type'] == 'revolute': node['joint']['range'] = [0., float(data['range'][i][0])] elif node['joint']['type'] == 'continuous': node['joint']['range'] = [0., 360.] elif node['joint']['type'] == 'prismatic' or node['joint']['type'] == 'screw': node['joint']['range'] = [0., float(data['range'][i][1])] node['joint']['axis'] = {} # relocate the axis to visualize well axis_o, axis_d = rescale_axis(int(data['type'][i].item()), data['axis_d'][i], data['axis_o'][i], data['center'][i]) node['joint']['axis']['direction'] = axis_d node['joint']['axis']['origin'] = axis_o out['diffuse_tree'].append(node) return out # ------------------------------- visualizations ------------------------------- # def prepare_meshes(self, info_dict): ''' Function to prepare the bbox and axis meshes for visualization Args: - info_dict (dict): output json containing the graph information ''' tree = info_dict['diffuse_tree'] bbox_0, bbox_1, axiss, labels, jtypes = [], [], [], [], [] root_id = 0 # get root id for node in tree: if node['parent'] == -1: root_id = node['id'] for node in tree: # retrieve info box_cen = np.array(node['aabb']['center']) box_size = np.array(node['aabb']['size']) jrange = node['joint']['range'] jtype = node['joint']['type'] axis_d = np.array(node['joint']['axis']['direction']) axis_o = np.array(node['joint']['axis']['origin']) label = label_ref['fwd'][node['name']] jtype_id = joint_ref['fwd'][node['joint']['type']] # construct meshes for bbox if node['id'] == root_id or node['parent'] == root_id: # no transform bb_0, bb_1 = get_bbox_mesh_pair(box_cen, box_size, jtype=jtype_id, jrange=jrange, axis_d=axis_d, axis_o=axis_o) else: parent_id = node['parent'] jrange_p = tree[parent_id]['joint']['range'] jtype_p = tree[parent_id]['joint']['type'] jtype_p_id = joint_ref['fwd'][jtype_p] axis_d_p = np.array(tree[parent_id]['joint']['axis']['direction']) axis_o_p = np.array(tree[parent_id]['joint']['axis']['origin']) bb_0, bb_1 = get_bbox_mesh_pair(box_cen, box_size, jtype=jtype_p_id, jrange=jrange_p, axis_d=axis_d_p, axis_o=axis_o_p) # construct mesh for axis (the axis is not supporting transform for now)
axis = get_axis_mesh(axis_d, axis_o, box_cen, jtype)
8
2023-12-06 23:08:41+00:00
8k
duxiaodan/intrinsic-lora
augunet_diode_pseudo_depth.py
[ { "identifier": "plot_depth_map", "path": "diode/diode.py", "snippet": "def plot_depth_map(dm, validity_mask):\n validity_mask = validity_mask > 0\n MIN_DEPTH = 0.5\n MAX_DEPTH = min(300, np.percentile(dm, 99))\n dm = np.clip(dm, MIN_DEPTH, MAX_DEPTH)\n\n dm = (dm - np.min(dm)) / np.ptp(dm)\n dm = 1-dm\n dm = np.stack([dm]*3,axis=-1)\n \n dm[np.where(validity_mask == False)] = 0\n dm = Image.fromarray(np.uint8(dm[:,:,:3]*255)).convert('RGB')\n mask = Image.fromarray(np.uint8(validity_mask*255))\n return dm, mask" }, { "identifier": "check_and_tuplize_tokens", "path": "diode/diode.py", "snippet": "def check_and_tuplize_tokens(tokens, valid_tokens):\n if not isinstance(tokens, (tuple, list)):\n tokens = (tokens, )\n for split in tokens:\n assert split in valid_tokens\n return tokens" }, { "identifier": "enumerate_paths", "path": "diode/diode.py", "snippet": "def enumerate_paths(src):\n '''flatten out a nested dictionary into an iterable\n DIODE metadata is a nested dictionary;\n One could easily query a particular scene and scan, but sequentially\n enumerating files in a nested dictionary is troublesome. This function\n recursively traces out and aggregates the leaves of a tree.\n '''\n if isinstance(src, list):\n return src\n elif isinstance(src, dict):\n acc = []\n for k, v in src.items():\n _sub_paths = enumerate_paths(v)\n _sub_paths = list(map(lambda x: osp.join(k, x), _sub_paths))\n acc.append(_sub_paths)\n return list(chain.from_iterable(acc))\n else:\n raise ValueError('do not accept data type {}'.format(type(src)))" }, { "identifier": "_VALID_SPLITS", "path": "diode/diode.py", "snippet": "_VALID_SPLITS = ('train', 'val', 'test')" }, { "identifier": "_VALID_SCENE_TYPES", "path": "diode/diode.py", "snippet": "_VALID_SCENE_TYPES = ('indoors', 'outdoor')" }, { "identifier": "new_call", "path": "rescale_cfg_pipeline_forward.py", "snippet": "@torch.no_grad()\ndef new_call(\n self,\n prompt: Union[str, List[str]] = None,\n image: Union[\n torch.FloatTensor,\n PIL.Image.Image,\n np.ndarray,\n List[torch.FloatTensor],\n List[PIL.Image.Image],\n List[np.ndarray],\n ] = None,\n num_inference_steps: int = 100,\n guidance_scale: float = 7.5,\n image_guidance_scale: float = 1.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n guidance_rescale: float = 0.0,\n):\n r\"\"\"\n The call function to the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.\n image (`torch.FloatTensor` `np.ndarray`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):\n `Image` or tensor representing an image batch to be repainted according to `prompt`. Can also accept\n image latents as `image`, but if passing latents directly it is not encoded again.\n num_inference_steps (`int`, *optional*, defaults to 100):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n A higher guidance scale value encourages the model to generate images closely linked to the text\n `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.\n image_guidance_scale (`float`, *optional*, defaults to 1.5):\n Push the generated image towards the inital `image`. Image guidance scale is enabled by setting\n `image_guidance_scale > 1`. Higher image guidance scale encourages generated images that are closely\n linked to the source `image`, usually at the expense of lower image quality. This pipeline requires a\n value of at least `1`.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide what to not include in image generation. If not defined, you need to\n pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies\n to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.\n generator (`torch.Generator`, *optional*):\n A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make\n generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor is generated by sampling using the supplied random `generator`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not\n provided, text embeddings are generated from the `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If\n not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generated image. Choose between `PIL.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that calls every `callback_steps` steps during inference. The function is called with the\n following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function is called. If not specified, the callback is called at\n every step.\n\n Examples:\n\n ```py\n >>> import PIL\n >>> import requests\n >>> import torch\n >>> from io import BytesIO\n\n >>> from diffusers import StableDiffusionInstructPix2PixPipeline\n\n\n >>> def download_image(url):\n ... response = requests.get(url)\n ... return PIL.Image.open(BytesIO(response.content)).convert(\"RGB\")\n\n\n >>> img_url = \"https://huggingface.co/datasets/diffusers/diffusers-images-docs/resolve/main/mountain.png\"\n\n >>> image = download_image(img_url).resize((512, 512))\n\n >>> pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(\n ... \"timbrooks/instruct-pix2pix\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n >>> prompt = \"make the mountains snowy\"\n >>> image = pipe(prompt=prompt, image=image).images[0]\n ```\n\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,\n otherwise a `tuple` is returned where the first element is a list with the generated images and the\n second element is a list of `bool`s indicating whether the corresponding generated image contains\n \"not-safe-for-work\" (nsfw) content.\n \"\"\"\n # 0. Check inputs\n self.check_inputs(prompt, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds)\n\n if image is None:\n raise ValueError(\"`image` input cannot be undefined.\")\n\n # 1. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0 and image_guidance_scale >= 1.0\n # check if scheduler is in sigmas space\n scheduler_is_in_sigma_space = hasattr(self.scheduler, \"sigmas\")\n\n # 2. Encode input prompt\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n )\n\n # 3. Preprocess image\n image = self.image_processor.preprocess(image)\n\n # 4. set timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare Image latents\n image_latents = self.prepare_image_latents(\n image,\n batch_size,\n num_images_per_prompt,\n prompt_embeds.dtype,\n device,\n do_classifier_free_guidance,\n generator,\n )\n\n height, width = image_latents.shape[-2:]\n height = height * self.vae_scale_factor\n width = width * self.vae_scale_factor\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n )\n\n # 7. Check that shapes of latents and image match the UNet channels\n num_channels_image = image_latents.shape[1]\n if num_channels_latents + num_channels_image != self.unet.config.in_channels:\n raise ValueError(\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\n f\" `num_channels_image`: {num_channels_image} \"\n f\" = {num_channels_latents+num_channels_image}. Please verify the config of\"\n \" `pipeline.unet` or your `image` input.\"\n )\n\n # 8. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 9. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # Expand the latents if we are doing classifier free guidance.\n # The latents are expanded 3 times because for pix2pix the guidance\\\n # is applied for both the text and the input image.\n latent_model_input = torch.cat([latents] * 3) if do_classifier_free_guidance else latents\n\n # concat latents, image_latents in the channel dimension\n scaled_latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n scaled_latent_model_input = torch.cat([scaled_latent_model_input, image_latents], dim=1)\n\n # predict the noise residual\n noise_pred = self.unet(\n scaled_latent_model_input, t, encoder_hidden_states=prompt_embeds, return_dict=False\n )[0]\n\n # Hack:\n # For karras style schedulers the model does classifer free guidance using the\n # predicted_original_sample instead of the noise_pred. So we need to compute the\n # predicted_original_sample here if we are using a karras style scheduler.\n if scheduler_is_in_sigma_space:\n step_index = (self.scheduler.timesteps == t).nonzero()[0].item()\n sigma = self.scheduler.sigmas[step_index]\n noise_pred = latent_model_input - sigma * noise_pred\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_text, noise_pred_image, noise_pred_uncond = noise_pred.chunk(3)\n noise_pred = (\n noise_pred_uncond\n + guidance_scale * (noise_pred_text - noise_pred_image)\n + image_guidance_scale * (noise_pred_image - noise_pred_uncond)\n )\n if do_classifier_free_guidance and guidance_rescale > 0.0:\n # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n # print('Doing guidance rescale!')\n noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)\n\n # Hack:\n # For karras style schedulers the model does classifer free guidance using the\n # predicted_original_sample instead of the noise_pred. But the scheduler.step function\n # expects the noise_pred and computes the predicted_original_sample internally. So we\n # need to overwrite the noise_pred here such that the value of the computed\n # predicted_original_sample is correct.\n if scheduler_is_in_sigma_space:\n noise_pred = (noise_pred - latents) / (-sigma)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if not output_type == \"latent\":\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" } ]
import argparse import logging import math import os import os.path as osp import random import shutil import wandb import numpy as np import torch import torch.nn.functional as F import torchvision.transforms.functional as TF import torch.utils.checkpoint import transformers import diffusers import copy import json import datetime import matplotlib import wandb import xformers import bitsandbytes as bnb from pathlib import Path from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import ProjectConfiguration, set_seed from torch.utils.data import Dataset from huggingface_hub import create_repo, upload_folder from packaging import version from torchvision import transforms from tqdm.auto import tqdm from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DiffusionPipeline, UNet2DConditionModel, DPMSolverMultistepScheduler from diffusers.loaders import AttnProcsLayers from diffusers.models.attention_processor import LoRAAttnProcessor from diffusers.optimization import get_scheduler from diffusers.utils import is_wandb_available from diffusers.utils.import_utils import is_xformers_available from PIL import Image from PIL.ImageOps import exif_transpose from diode.diode import ( plot_depth_map, check_and_tuplize_tokens, enumerate_paths, _VALID_SPLITS, _VALID_SCENE_TYPES ) from torchvision.transforms.functional import pil_to_tensor from torchvision.transforms.functional import to_pil_image from rescale_cfg_pipeline_forward import new_call
6,771
).images[0] image = to_pil_image(pil_to_tensor(image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8)) val_test_images1.append(image) if test_batches[1] is not None: image = pipeline.new_call( prompt_embeds = pipeline.text_encoder(test_batch2['input_ids'][ii:ii+1])[0], # latents=test_batch2['noises'][ii].unsqueeze(0), image = Image.fromarray(tensor2np(test_batch2['original_pixel_values'][ii])), image_guidance_scale = 1., guidance_scale = 3.0, generator=generator, num_inference_steps = 25, #Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf guidance_rescale = 0.7, # output_type = 'np', ).images[0] image = to_pil_image(pil_to_tensor(image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8)) val_test_images2.append(image) val_train_image = pipeline.new_call( prompt_embeds=pipeline.text_encoder(train_batch['input_ids'][ii:ii+1])[0], # latents=train_batch['noises'][ii].unsqueeze(0), image = Image.fromarray(tensor2np(train_batch['original_pixel_values'][ii])), image_guidance_scale = 1., guidance_scale = 3.0, generator = generator, num_inference_steps = 25, #Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf guidance_rescale = 0.7, # output_type = 'np', ).images[0] val_train_image = to_pil_image(pil_to_tensor(val_train_image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8)) val_train_images.append(val_train_image) concat_test_images1 = [] concat_test_images2 = [] concat_train_images = [] for gt, im_1, im_2, im_3 in zip(test_batch1['gt_values'],test_batch1['original_pixel_values'],test_batch1['pixel_values'],val_test_images1): output_img = visualization_routine(gt,im_1,im_2,im_3) concat_test_images1.append(output_img) for gt, im_1, im_2, im_3 in zip(test_batch2['gt_values'],test_batch2['original_pixel_values'],test_batch2['pixel_values'],val_test_images2): output_img = visualization_routine(gt,im_1,im_2,im_3) concat_test_images2.append(output_img) for gt, im_1, im_2, im_3 in zip(train_batch['gt_values'],train_batch['original_pixel_values'],train_batch['pixel_values'],val_train_images): output_img = visualization_routine(gt,im_1,im_2,im_3) concat_train_images.append(output_img) for tracker in accelerator.trackers: if tracker.name == "wandb": tracker.log( { "validation: training images": [ wandb.Image(image, ) for i, image in enumerate(concat_train_images) ], }, step=global_step ) tracker.log( { "validation: test images 1": [ wandb.Image(image, ) for i, image in enumerate(concat_test_images1) ], }, step=global_step ) tracker.log( { "validation: test images 2": [ wandb.Image(image, ) for i, image in enumerate(concat_test_images2) ], }, step=global_step ) del pipeline torch.cuda.empty_cache() return class PSEUDODepthDataset(Dataset): def __init__( self, data_root, pseudo_root, tokenizer, splits, scene_types, size=512, center_crop=True, num_train_imgs=None, tokenizer_max_length=None, empty_prompt = False, unified_prompt = None, ): self.data_root = Path(data_root) self.pseudo_root = Path(pseudo_root) self.splits = check_and_tuplize_tokens( splits, _VALID_SPLITS ) self.scene_types = check_and_tuplize_tokens( scene_types, _VALID_SCENE_TYPES ) meta_fname = self.data_root.parent / 'diode_meta.json' with open(meta_fname, 'r') as f: self.meta = json.load(f) self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.tokenizer_max_length = tokenizer_max_length self.num_train_imgs = num_train_imgs self.empty_prompt = empty_prompt self.unified_prompt = unified_prompt if not self.data_root.exists(): raise ValueError("Instance images root doesn't exists.") imgs = [] for split in self.splits: for scene_type in self.scene_types:
# coding=utf-8 # Intrinsic-LoRA """Intrinsic-LoRA AugUNet model for depth training""" #Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf logger = get_logger(__name__, log_level="INFO") def save_model_card(repo_id: str, images=None, base_model=str, dataset_name=str, repo_folder=None): img_str = "" for i, image in enumerate(images): image.save(os.path.join(repo_folder, f"image_{i}.png")) img_str += f"![img_{i}](./image_{i}.png)\n" yaml = f""" --- license: creativeml-openrail-m base_model: {base_model} tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora inference: true --- """ model_card = f""" # LoRA text2image fine-tuning - {repo_id} These are LoRA adaption weights for {base_model}. The weights were fine-tuned on the {dataset_name} dataset. You can find some example images in the following. \n {img_str} """ with open(os.path.join(repo_folder, "README.md"), "w") as f: f.write(yaml + model_card) def colorize( value, cmap='inferno_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None, vmin=None, vmax=None, ): if isinstance(value, torch.Tensor): value = value.detach().cpu().numpy() value = value.squeeze() if invalid_mask is None: invalid_mask = value == invalid_val mask = np.logical_not(invalid_mask) else: mask = np.where(invalid_mask==1) invalid_mask = np.where(invalid_mask==0) vmax = np.percentile(value[mask],99) if vmax!=0.: value = value/vmax # vmin..vmax else: value = value * 0. # squeeze last dim if it exists # grey out the invalid values if cmap=='singlechannel': img = 1-np.stack([value]*3,axis=-1) img[invalid_mask] = 0 return Image.fromarray(((img.clip(max=1.))*255.).astype(np.uint8)) else: value[invalid_mask] = np.nan cmapper = matplotlib.cm.get_cmap(cmap) if value_transform: value = value_transform(value) value = cmapper(value, bytes=True) # (nxmx4) img = value[...] img[invalid_mask] = background_color if gamma_corrected: # gamma correction img = img / 255 img = np.power(img, 2.2) img = img * 255 img = img.astype(np.uint8) return img def tokenize_prompt(tokenizer, prompt, tokenizer_max_length=None): if tokenizer_max_length is not None: max_length = tokenizer_max_length else: max_length = tokenizer.model_max_length text_inputs = tokenizer( prompt, truncation=True, padding="max_length", max_length=max_length, return_tensors="pt", ) return text_inputs def tensor2np(tensor): return (255*(tensor.cpu().permute(1,2,0).numpy()*0.5+0.5)).astype(np.uint8) def listPILToTensor(listPILs): size = listPILs[0].size[0] image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) return torch.stack([image_transforms(p) for p in listPILs]) def visualization_routine(gt,im_1,im_2,im_3): gt = tensor2np(gt) im_1 = tensor2np(im_1) im_2 = tensor2np(im_2) im_3 = np.array(im_3) return Image.fromarray(np.hstack((im_1,gt,im_2,im_3))) @torch.inference_mode() def log_validation( text_encoder, tokenizer, unet, vae, args, accelerator, zero_snr_betas, test_batches, train_batch, weight_dtype, epoch, global_step ): pipeline = DiffusionPipeline.from_pretrained( "timbrooks/instruct-pix2pix", unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), revision=args.revision, torch_dtype=weight_dtype, safety_checker=None, ) scheduler_args = {} if "variance_type" in pipeline.scheduler.config: variance_type = pipeline.scheduler.config.variance_type if variance_type in ["learned", "learned_range"]: variance_type = "fixed_small" scheduler_args["variance_type"] = variance_type pipeline.scheduler = DPMSolverMultistepScheduler.from_config( pipeline.scheduler.config, **scheduler_args, #Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf prediction_type='v_prediction', trained_betas=zero_snr_betas, ) assert pipeline.scheduler.prediction_type == "v_prediction" assert pipeline.scheduler.alphas_cumprod[-1] == 0. pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) pipeline.new_call = new_call.__get__(pipeline, pipeline.__class__) val_test_images1 = [] val_test_images2 = [] val_train_images = [] test_batch1 = test_batches[0] test_batch2 = test_batches[1] for ii in range(4): with torch.no_grad(): if test_batches[0] is not None: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed+100) if args.seed else None image = pipeline.new_call( prompt_embeds = pipeline.text_encoder(test_batch1['input_ids'][ii:ii+1])[0], image = Image.fromarray(tensor2np(test_batch1['original_pixel_values'][ii])), image_guidance_scale = 1., guidance_scale = 3.0, generator=generator, num_inference_steps = 25, #Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf guidance_rescale = 0.7, # output_type = 'np', ).images[0] image = to_pil_image(pil_to_tensor(image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8)) val_test_images1.append(image) if test_batches[1] is not None: image = pipeline.new_call( prompt_embeds = pipeline.text_encoder(test_batch2['input_ids'][ii:ii+1])[0], # latents=test_batch2['noises'][ii].unsqueeze(0), image = Image.fromarray(tensor2np(test_batch2['original_pixel_values'][ii])), image_guidance_scale = 1., guidance_scale = 3.0, generator=generator, num_inference_steps = 25, #Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf guidance_rescale = 0.7, # output_type = 'np', ).images[0] image = to_pil_image(pil_to_tensor(image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8)) val_test_images2.append(image) val_train_image = pipeline.new_call( prompt_embeds=pipeline.text_encoder(train_batch['input_ids'][ii:ii+1])[0], # latents=train_batch['noises'][ii].unsqueeze(0), image = Image.fromarray(tensor2np(train_batch['original_pixel_values'][ii])), image_guidance_scale = 1., guidance_scale = 3.0, generator = generator, num_inference_steps = 25, #Xiaodan: according to https://arxiv.org/pdf/2305.08891.pdf guidance_rescale = 0.7, # output_type = 'np', ).images[0] val_train_image = to_pil_image(pil_to_tensor(val_train_image).float().mean(0,keepdims=True).repeat(3,1,1).to(torch.uint8)) val_train_images.append(val_train_image) concat_test_images1 = [] concat_test_images2 = [] concat_train_images = [] for gt, im_1, im_2, im_3 in zip(test_batch1['gt_values'],test_batch1['original_pixel_values'],test_batch1['pixel_values'],val_test_images1): output_img = visualization_routine(gt,im_1,im_2,im_3) concat_test_images1.append(output_img) for gt, im_1, im_2, im_3 in zip(test_batch2['gt_values'],test_batch2['original_pixel_values'],test_batch2['pixel_values'],val_test_images2): output_img = visualization_routine(gt,im_1,im_2,im_3) concat_test_images2.append(output_img) for gt, im_1, im_2, im_3 in zip(train_batch['gt_values'],train_batch['original_pixel_values'],train_batch['pixel_values'],val_train_images): output_img = visualization_routine(gt,im_1,im_2,im_3) concat_train_images.append(output_img) for tracker in accelerator.trackers: if tracker.name == "wandb": tracker.log( { "validation: training images": [ wandb.Image(image, ) for i, image in enumerate(concat_train_images) ], }, step=global_step ) tracker.log( { "validation: test images 1": [ wandb.Image(image, ) for i, image in enumerate(concat_test_images1) ], }, step=global_step ) tracker.log( { "validation: test images 2": [ wandb.Image(image, ) for i, image in enumerate(concat_test_images2) ], }, step=global_step ) del pipeline torch.cuda.empty_cache() return class PSEUDODepthDataset(Dataset): def __init__( self, data_root, pseudo_root, tokenizer, splits, scene_types, size=512, center_crop=True, num_train_imgs=None, tokenizer_max_length=None, empty_prompt = False, unified_prompt = None, ): self.data_root = Path(data_root) self.pseudo_root = Path(pseudo_root) self.splits = check_and_tuplize_tokens( splits, _VALID_SPLITS ) self.scene_types = check_and_tuplize_tokens( scene_types, _VALID_SCENE_TYPES ) meta_fname = self.data_root.parent / 'diode_meta.json' with open(meta_fname, 'r') as f: self.meta = json.load(f) self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.tokenizer_max_length = tokenizer_max_length self.num_train_imgs = num_train_imgs self.empty_prompt = empty_prompt self.unified_prompt = unified_prompt if not self.data_root.exists(): raise ValueError("Instance images root doesn't exists.") imgs = [] for split in self.splits: for scene_type in self.scene_types:
_curr = enumerate_paths(self.meta[split][scene_type])
2
2023-12-08 16:34:44+00:00
8k
modelscope/llmuses
llmuses/run_ms.py
[ { "identifier": "DATASET_ID", "path": "llmuses/benchmarks/ceval/ceval_adapter.py", "snippet": "DATASET_ID = 'modelscope/ceval-exam'" }, { "identifier": "DATASET_ID", "path": "llmuses/benchmarks/mmlu/mmlu_adapter.py", "snippet": "DATASET_ID = 'modelscope/mmlu'" }, { "identifier": "DATASET_ID", "path": "llmuses/benchmarks/hellaswag/hellaswag_adapter.py", "snippet": "DATASET_ID = 'modelscope/hellaswag'" }, { "identifier": "DATASET_ID", "path": "llmuses/benchmarks/arc/arc_adapter.py", "snippet": "DATASET_ID = 'modelscope/ai2_arc'" }, { "identifier": "DATASET_ID", "path": "llmuses/benchmarks/truthful_qa/truthful_qa_adapter.py", "snippet": "DATASET_ID = 'modelscope/truthful_qa'" }, { "identifier": "DEFAULT_ROOT_CACHE_DIR", "path": "llmuses/constants.py", "snippet": "DEFAULT_ROOT_CACHE_DIR = '~/.cache/llmuses'" }, { "identifier": "Evaluator", "path": "llmuses/evaluator/evaluator.py", "snippet": "class Evaluator(object):\n\n \"\"\"\n The evaluator for model on datasets.\n \"\"\"\n\n def __init__(self,\n dataset_name_or_path: str,\n data_adapter: DataAdapter,\n subset_list: Optional[list] = None,\n model_adapter: Optional[BaseModelAdapter] = None,\n use_cache: bool = True,\n mem_cache_method: str = 'ttl',\n root_cache_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,\n outputs_dir: Optional[str] = '',\n is_custom_outputs_dir: bool = False,\n datasets_dir: Optional[str] = DEFAULT_ROOT_CACHE_DIR,\n stage: Optional[str] = 'all',\n **kwargs):\n\n self.dataset_name_or_path = dataset_name_or_path\n self.root_cache_dir = os.path.expanduser(root_cache_dir)\n self.datasets_dir = os.path.expanduser(datasets_dir)\n self.kwargs = kwargs\n self.data_adapter = data_adapter\n self.model_adapter = model_adapter\n\n self.model_cfg = self.model_adapter.model_cfg\n self.model_id = self.model_cfg['model_id']\n self.model_revision = self.model_cfg.get('revision', None)\n self.model_revision_str = self.model_revision if self.model_revision is not None else 'none'\n\n # Get default outputs_dir\n if not is_custom_outputs_dir:\n outputs_dir = make_outputs_dir(work_dir=outputs_dir,\n model_id=self.model_id,\n model_revision=self.model_revision_str)\n\n self.outputs_dir = os.path.expanduser(outputs_dir)\n\n # Deal with the output paths\n self.outputs_structure = make_outputs_structure(self.outputs_dir)\n\n # Load dataset\n self.dataset = self.data_adapter.load(dataset_name_or_path=dataset_name_or_path,\n subset_list=subset_list,\n work_dir=self.datasets_dir,\n **kwargs)\n\n # Get prompts from dataset\n self.prompts = self.data_adapter.gen_prompts(data_dict=self.dataset)\n del self.dataset\n\n # Init memory cache\n # TODO: refactor mem cache manager\n mem_cache_file_name = self.dataset_name_or_path.replace('/', '_') + \\\n '_' + self.model_id.replace('/', '_') + \\\n '_' + self.model_revision_str + \\\n '_cache.pkl'\n self.mem_cache_path = os.path.join(self.root_cache_dir, 'mem_cache', mem_cache_file_name)\n self.use_cache = use_cache\n self.mem_cache_method = mem_cache_method\n self.mem_cache = None\n if self.use_cache:\n self.mem_cache = init_mem_cache(method=self.mem_cache_method, cache_file_path=self.mem_cache_path)\n logger.info(f'** Using memory cache with size: {len(self.mem_cache)}')\n\n def _pred_answer(self,\n input_d: dict,\n infer_cfg: dict,\n subset_name: str,\n answer_id: str = None) -> dict:\n\n # Get answer from memory cache\n if self.mem_cache is not None:\n if answer_id in self.mem_cache:\n logger.info(f'** Reusing answer `{answer_id}` in memory cache.')\n return self.mem_cache[answer_id]\n\n ans: dict = self.model_adapter.predict(inputs=input_d, infer_cfg=infer_cfg)\n ans[AnswerKeys.ANSWER_ID] = answer_id\n ans[AnswerKeys.SUBSET_NAME] = subset_name\n\n if self.mem_cache is not None:\n self.mem_cache[answer_id] = ans\n\n return ans\n\n def get_answers(self,\n subset_name: str,\n prompts_list: List[dict],\n infer_cfg: dict = None,\n debug: bool = False,\n **kwargs) -> list:\n \"\"\"\n Get answers from model inference.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n subset_name: subset name for benchmark.\n prompts_list: prompts list.\n infer_cfg: model inference config.\n Attributes:\n do_sample: bool, whether to use sampling.\n top_k: int, the number of highest probability vocabulary tokens to keep for top-k-filtering.\n top_p: float, if set to float < 1, only the most probable tokens with probabilities to add.\n temperature: float, the value used to module the next token probabilities.\n num_beams: int, number of beams for beam search. 1 means no beam search.\n max_length: int, the max length of the sequence to be generated.\n max_new_tokens: int, the max number of new tokens to be generated.\n repetition_penalty: float, the parameter for repetition penalty. 1.0 means no penalty.\n debug: whether to run in debug mode.\n **kwargs: kwargs.\n\n Returns: The list of answers.\n \"\"\"\n assert self.data_adapter is not None, 'data_adapter must be provided when calling func get_answers() !'\n assert self.model_adapter is not None, 'model must be provided when calling func get_answers() !'\n\n answers_list = []\n for input_prompt in tqdm(prompts_list, total=len(prompts_list), desc=f'Predicting({subset_name}): '):\n\n # Gen answer_id (concat: model_cfg + input_prompt + infer_cfg)\n model_cfg_str = json.dumps(\n OrderedDict(sorted(dict_torch_dtype_to_str(self.model_adapter.model_cfg).items())),\n ensure_ascii=False)\n input_prompt_str = json.dumps(OrderedDict(sorted(dict_torch_dtype_to_str(input_prompt).items())),\n ensure_ascii=False)\n infer_cfg_str = json.dumps(OrderedDict(sorted(dict_torch_dtype_to_str(infer_cfg).items())),\n ensure_ascii=False)\n answer_id = 'answer-' + gen_hash(model_cfg_str + input_prompt_str + infer_cfg_str)\n\n # Get answers\n answer_d: dict = self._pred_answer(input_d=input_prompt,\n infer_cfg=infer_cfg,\n subset_name=subset_name,\n answer_id=answer_id)\n\n answer_d[AnswerKeys.MODEL_SPEC] = self.model_adapter.model_cfg\n answer_d[AnswerKeys.RAW_INPUT] = input_prompt[AnswerKeys.RAW_INPUT]\n answer_d[AnswerKeys.ORIGIN_PROMPT] = input_prompt\n\n if debug:\n logger.debug(f'**input_prompt: {json.dumps(input_prompt, ensure_ascii=False)} \\n')\n logger.debug(f'**predicted ans: {json.dumps(answer_d, ensure_ascii=False)} \\n')\n\n answers_list.append(answer_d)\n\n # Dump answers\n pred_dir: str = self.outputs_structure.get(OutputsStructure.PREDICTIONS_DIR)\n pred_file_name: str = self.dataset_name_or_path.replace('/', '_') + '_' + subset_name + '.jsonl'\n os.makedirs(pred_dir, exist_ok=True)\n dump_jsonl_data(answers_list, os.path.join(pred_dir, pred_file_name))\n\n return answers_list\n\n def _get_review(self,\n answer_d: dict,\n review_id: str = None,\n reviewer_spec: dict = None) -> dict:\n\n # Get review from memory cache\n if self.mem_cache is not None:\n if review_id in self.mem_cache:\n logger.info(f'** Reusing review `{review_id}` in memory cache.')\n return self.mem_cache[review_id]\n\n if reviewer_spec is None:\n reviewer_spec = {}\n\n review_res = deepcopy(answer_d)\n choices = review_res[AnswerKeys.CHOICES]\n if len(choices) == 0:\n review_res[ReviewKeys.REVIEWED] = False\n review_res[ReviewKeys.REVIEW_ID] = None\n review_res[ReviewKeys.REVIEWER_SPEC] = reviewer_spec\n review_res[ReviewKeys.REVIEW_TIME] = time.time()\n return review_res\n\n rev_choices = []\n for choice in choices:\n raw_input_d: dict = review_res[AnswerKeys.RAW_INPUT]\n answer_content = choice[ReviewKeys.MESSAGE][ReviewKeys.CONTENT]\n answer_content = self.data_adapter.parse_pred_result(answer_content, raw_input_d)\n gold_content = self.data_adapter.get_gold_answer(raw_input_d)\n\n review_result = self.data_adapter.match(gold_content, answer_content)\n choice[ReviewKeys.REVIEW] = {ReviewKeys.GOLD: gold_content,\n ReviewKeys.PRED: answer_content,\n ReviewKeys.RESULT: review_result}\n\n rev_choices.append(choice)\n\n review_res[AnswerKeys.CHOICES] = rev_choices\n review_res[ReviewKeys.REVIEWED] = True\n review_res[ReviewKeys.REVIEW_ID] = review_id\n review_res[ReviewKeys.REVIEWER_SPEC] = reviewer_spec\n review_res[ReviewKeys.REVIEW_TIME] = time.time()\n\n if self.mem_cache is not None:\n self.mem_cache[review_id] = review_res\n\n return review_res\n\n def get_reviews(self, subset_name: str, answers_list: List[dict], debug: bool = False, **kwargs) -> list:\n \"\"\"\n Get reviews from answers.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n subset_name: subset name of benchmark\n answers_list: inference results list.\n debug: whether to run in debug mode.\n **kwargs: kwargs.\n\n Returns: reviews list.\n \"\"\"\n reviews_list = []\n for answer_d in tqdm(answers_list, total=len(answers_list), desc=f'Reviewing({subset_name}): '):\n\n # Gen review_id (concat: answer_id + reviewer_spec)\n answer_id = answer_d[AnswerKeys.ANSWER_ID]\n\n reviewer_spec: dict = {'metric': [metric_d['name'] for metric_d in self.data_adapter.metric_list],\n 'reviewer': ['Evaluator'],\n 'revision': ['default']}\n reviewer_spec_str = json.dumps(OrderedDict(sorted(dict_torch_dtype_to_str(reviewer_spec).items())),\n ensure_ascii=False)\n review_id = 'review-' + gen_hash(answer_id + reviewer_spec_str)\n\n # Get review\n review_d = self._get_review(answer_d=answer_d, review_id=review_id, reviewer_spec=reviewer_spec)\n\n if debug:\n logger.debug(review_d)\n\n reviews_list.append(review_d)\n\n # Dump reviews\n review_dir: str = self.outputs_structure.get(OutputsStructure.REVIEWS_DIR)\n review_file_name: str = self.dataset_name_or_path.replace('/', '_') + '_' + subset_name + '.jsonl'\n os.makedirs(review_dir, exist_ok=True)\n dump_jsonl_data(reviews_list, os.path.join(review_dir, review_file_name))\n\n return reviews_list\n\n def compute_metrics(self, reviews_list: List[dict]) -> Any:\n \"\"\"\n To compute metrics from reviews_list for each subset.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n reviews_list: reviews list.\n\n Returns:\n The metric result. Depends on the metric function in data_adapter.\n \"\"\"\n\n review_res_list = []\n for review_d in reviews_list:\n if not review_d[ReviewKeys.REVIEWED]:\n logger.warning(f'** Review not finished for answer_id: {review_d[AnswerKeys.ANSWER_ID]}')\n continue\n\n review_res = review_d[AnswerKeys.CHOICES][0][ReviewKeys.REVIEW][ReviewKeys.RESULT]\n review_res_list.append(review_res)\n\n metric_score: Union[float, dict] = self.data_adapter.compute_metric(review_res_list=review_res_list)\n\n return metric_score\n\n def dump_report(self, report_map: dict, use_table: bool = True):\n \"\"\"\n Get report for total reviews of specific dataset.\n It is required to rewrite this method to support your own evaluator.\n\n Args:\n report_map: report dict. Generated by func self.data_adapter.gen_report().\n use_table: whether to generate table for reports. Default to True.\n\n Returns: None\n \"\"\"\n\n # Dump report\n report_dir: str = self.outputs_structure[OutputsStructure.REPORTS_DIR]\n report_file_name: str = self.dataset_name_or_path.replace('/', '_') + '.json'\n os.makedirs(report_dir, exist_ok=True)\n report_path: str = os.path.join(report_dir, report_file_name)\n with open(report_path, 'w') as f:\n f.write(json.dumps(report_map, ensure_ascii=False, indent=4))\n # logger.info(f'** Dump report to {report_path} \\n')\n logger.info(f'** Dump report: {report_file_name} \\n')\n\n if use_table:\n try:\n # Make table\n report_table: str = gen_table([report_dir])\n logger.info(f'** Report table: \\n {report_table} \\n')\n except:\n logger.error('Failed to generate report table.')\n\n def save_cache(self):\n if self.mem_cache is not None:\n logger.info(f'** Saving memory cache with size: {len(self.mem_cache)}')\n Cache.save(cache=self.mem_cache, path=self.mem_cache_path)\n\n def clear_cache(self):\n \"\"\"\n Clear memory cache.\n\n Returns: None\n \"\"\"\n if self.mem_cache is not None:\n cache_len = len(self.mem_cache)\n self.mem_cache.clear()\n logger.info(f'** Memory cache cleared, length changed: {cache_len} -> {len(self.mem_cache)}')\n\n def eval(self,\n infer_cfg: dict = None,\n debug: bool = False,\n **kwargs):\n \"\"\"\n Evaluate the model on the specific benchmark. Streaming & parallel mode is supported.\n It is required to rewrite this method to support your own evaluator.\n\n The evaluation process is as follows:\n 1. Get the input samples from the dataset (benchmarks on the ModelScope or HuggingFace).\n 2. Get the input prompts from dataset with specific data adapter.\n 3. Get answers with model inference.\n 4. Get reviews with metric function (or reviewers).\n 5. Generate report from review results.\n\n Args:\n infer_cfg: The config for model inference.\n debug: Whether to run in debug mode. Default: False.\n\n Returns:\n None.\n \"\"\"\n\n logger.info(f'**** Start evaluating on dataset {self.dataset_name_or_path} ****')\n\n reviews_map_all = {} # {subset_name: (score, num)}\n for subset_name, prompts_list in self.prompts.items():\n limit = infer_cfg.get('limit', len(prompts_list))\n prompts_list = prompts_list[:limit]\n\n answers_list: list = self.get_answers(subset_name=subset_name,\n prompts_list=prompts_list,\n infer_cfg=infer_cfg,\n debug=debug,\n **kwargs)\n\n reviews_list: list = self.get_reviews(subset_name=subset_name,\n answers_list=answers_list,\n debug=debug,\n **kwargs)\n\n metric_res = self.compute_metrics(reviews_list=reviews_list)\n reviews_map_all[subset_name] = (metric_res, len(reviews_list))\n\n # Generate report\n report_map: dict = self.data_adapter.gen_report(subset_score_map=reviews_map_all)\n self.dump_report(report_map=report_map)\n\n self.save_cache()\n self.clear_cache()\n\n logger.info(f'\\n**** Evaluation finished on {self.dataset_name_or_path} ****\\n')" }, { "identifier": "MultiChoiceModelAdapter", "path": "llmuses/models/model_adapter.py", "snippet": "class MultiChoiceModelAdapter(BaseModelAdapter):\n \"\"\" The multi-choice model adapter. \"\"\"\n\n _DEFAULT_MAX_LENGTH = 2048\n\n def __init__(self,\n model_id: str,\n device_map: str = 'auto',\n torch_dtype: dtype = torch.bfloat16,\n model_revision: str = None,\n max_length: int = None,\n **kwargs):\n \"\"\"\n Args:\n model_id: The model id on ModelScope, or local model_dir. TODO: torch.nn.module to be supported.\n device_map: The device map for model inference.\n torch_dtype: The torch dtype for model inference. Default: torch.bfloat16.\n model_revision: The model revision on ModelScope. Default: None.\n max_length: The max length of input sequence. Default: None.\n **kwargs: Other args.\n \"\"\"\n\n self.model_id: str = model_id\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n torch_dtype = torch_dtype if torch_dtype is not None else 'auto'\n\n model_cfg: dict = dict()\n model_cfg['model_id'] = model_id\n model_cfg['device_map'] = device_map\n model_cfg['torch_dtype'] = str(torch_dtype)\n\n from modelscope.utils.hf_util import AutoModelForCausalLM, AutoTokenizer\n\n tokenizer = AutoTokenizer.from_pretrained(self.model_id,\n revision=model_revision,\n trust_remote_code=True,)\n\n model = AutoModelForCausalLM.from_pretrained(self.model_id,\n revision=model_revision,\n device_map=device_map,\n trust_remote_code=True,\n torch_dtype=torch_dtype,)\n\n # model.generation_config = GenerationConfig.from_pretrained(model_id, trust_remote_code=True)\n\n super().__init__(model=model, tokenizer=tokenizer, model_cfg=model_cfg)\n\n self._max_length = max_length\n\n @property\n def max_length(self):\n if self._max_length:\n return self._max_length\n seqlen_config_attrs = ('n_positions', 'max_position_embeddings', 'n_ctx')\n for attr in seqlen_config_attrs:\n if hasattr(self.model.config, attr):\n return getattr(self.model.config, attr)\n if hasattr(self.tokenizer, 'model_max_length'):\n if self.tokenizer.model_max_length == 1000000000000000019884624838656:\n return self._DEFAULT_MAX_LENGTH\n return self.tokenizer.model_max_length\n return self._DEFAULT_MAX_LENGTH\n\n @torch.no_grad()\n def predict(self, inputs: dict, infer_cfg: dict = None) -> dict:\n \"\"\"\n Multi-choice model prediction func.\n\n Args:\n inputs (dict): The inputs for a doc. Format:\n {'data': [full_prompt], 'multi_choices': ['A', 'B', 'C', 'D']}\n\n infer_cfg (dict): inference configuration.\n\n Returns:\n res (dict): The model prediction results. Format:\n {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': [-14.9609, -13.6015, ...], # loglikelihood values for inputs context-continuation pairs.\n 'role': 'assistant'\n }\n }\n ],\n 'created': 1677664795,\n # For models on the ModelScope or HuggingFace, concat model_id and revision with \"-\".\n 'model': 'gpt-3.5-turbo-0613',\n 'object': 'chat.completion',\n 'usage': {\n 'completion_tokens': 17,\n 'prompt_tokens': 57,\n 'total_tokens': 74\n }\n }\n \"\"\"\n\n # TODO: unused\n if infer_cfg is None:\n infer_cfg = {'do_sample': True, 'max_length': 1024}\n\n input_data = inputs['data']\n multi_choices = inputs['multi_choices']\n\n output, input_info = self._get_logits(self.tokenizer, self.model, input_data)\n assert output.shape[0] == 1\n logits = output.flatten()\n\n choice_logits = [logits[self.tokenizer(ch)['input_ids'][-1:]] for ch in multi_choices]\n softval = torch.nn.functional.softmax(torch.tensor(choice_logits).float(), dim=0)\n\n if softval.dtype in {torch.bfloat16, torch.float16}:\n softval = softval.to(dtype=torch.float32)\n probs = softval.detach().cpu().numpy()\n pred: str = multi_choices[int(np.argmax(probs))] # Format: A or B or C or D\n\n res_d = {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': pred,\n 'role': 'assistant'\n }\n }\n ],\n 'created': time.time(),\n 'model': self.model_id,\n 'object': 'chat.completion',\n 'usage': {}\n }\n\n return res_d\n\n @staticmethod\n def _get_logits(tokenizer, model, inputs: List[str]):\n input_ids = tokenizer(inputs, padding=False)['input_ids']\n input_ids = torch.tensor(input_ids, device=model.device)\n tokens = {'input_ids': input_ids}\n\n outputs = model(input_ids)['logits']\n logits = outputs[:, -1, :]\n log_probs = torch.nn.functional.softmax(logits, dim=-1)\n return log_probs, {'tokens': tokens}" }, { "identifier": "ContinuationLogitsModelAdapter", "path": "llmuses/models/model_adapter.py", "snippet": "class ContinuationLogitsModelAdapter(MultiChoiceModelAdapter):\n\n def __init__(self,\n model_id: str,\n device_map: str = 'auto',\n torch_dtype: dtype = torch.bfloat16,\n model_revision: str = None,\n **kwargs):\n \"\"\"\n Continuation-logits model adapter.\n\n Args:\n model_id: The model id on ModelScope, or local model_dir.\n device_map: The device map for model inference.\n torch_dtype: The torch dtype for model inference. Default: torch.bfloat16.\n model_revision: The model revision on ModelScope. Default: None.\n **kwargs: Other args.\n \"\"\"\n\n super().__init__(model_id=model_id,\n device_map=device_map,\n torch_dtype=torch_dtype,\n model_revision=model_revision,\n **kwargs)\n\n @torch.no_grad()\n def predict(self, inputs: dict, infer_cfg: dict = None) -> dict:\n \"\"\"\n Multi-choice model prediction func.\n Args:\n inputs (dict): The inputs for a doc. Format:\n {'data': [(context, continuation), ...]}\n infer_cfg (dict): inference configuration.\n Returns:\n res (dict): The model prediction results. Format:\n {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': [-14.9609, -13.6015, ...], # loglikelihood values for inputs context-continuation pairs.\n 'role': 'assistant'\n }\n }\n ],\n 'created': 1677664795,\n # For models on the ModelScope or HuggingFace, concat model_id and revision with \"-\".\n 'model': 'gpt-3.5-turbo-0613',\n 'object': 'chat.completion',\n 'usage': {\n 'completion_tokens': 17,\n 'prompt_tokens': 57,\n 'total_tokens': 74\n }\n }\n \"\"\"\n if infer_cfg is None:\n infer_cfg = {'do_sample': True, 'max_length': 2048}\n\n pred_list: list = self.loglikelihood(inputs=inputs['data'], infer_cfg=infer_cfg)\n\n res_d = {\n 'choices': [\n {\n 'index': 0,\n 'message': {\n 'content': pred_list,\n 'role': 'assistant'\n }\n }\n ],\n 'created': time.time(),\n 'model': self.model_id,\n 'object': 'chat.completion',\n 'usage': {}\n }\n return res_d\n\n def loglikelihood(self, inputs: list, infer_cfg: dict = None) -> list:\n # To predict one doc\n doc_ele_pred = []\n for ctx, continuation in inputs:\n\n # ctx_enc shape: [context_tok_len] cont_enc shape: [continuation_tok_len]\n ctx_enc, cont_enc = self._encode_pair(ctx, continuation)\n\n inputs_tokens = torch.tensor(\n (ctx_enc.tolist() + cont_enc.tolist())[-(self.max_length + 1):][:-1],\n dtype=torch.long,\n device=self.model.device).unsqueeze(0)\n\n logits = self.model(inputs_tokens)[0]\n logits = torch.nn.functional.log_softmax(logits.float(), dim=-1)\n\n logits = logits[:, -len(cont_enc):, :]\n cont_enc = cont_enc.unsqueeze(0).unsqueeze(-1)\n logits = torch.gather(logits.cpu(), 2, cont_enc.cpu()).squeeze(-1)\n\n choice_score = float(logits.sum())\n doc_ele_pred.append(choice_score)\n\n # e.g. [-2.3, -9.2, -12.9, 1.1], length=len(choices)\n return doc_ele_pred\n\n def _encode_pair(self, context, continuation):\n n_spaces = len(context) - len(context.rstrip())\n if n_spaces > 0:\n continuation = context[-n_spaces:] + continuation\n context = context[:-n_spaces]\n\n whole_enc = self.tokenizer(context + continuation, padding=False)['input_ids']\n whole_enc = torch.tensor(whole_enc, device=self.device)\n\n context_enc = self.tokenizer(context, padding=False)['input_ids']\n context_enc = torch.tensor(context_enc, device=self.device)\n\n context_enc_len = len(context_enc)\n continuation_enc = whole_enc[context_enc_len:]\n\n return context_enc, continuation_enc" }, { "identifier": "get_logger", "path": "llmuses/utils/logger.py", "snippet": "def get_logger(log_file: Optional[str] = None,\n log_level: int = logging.INFO,\n file_mode: str = 'w'):\n \"\"\" Get logging logger\n\n Args:\n log_file: Log filename, if specified, file handler will be added to\n logger\n log_level: Logging level.\n file_mode: Specifies the mode to open the file, if filename is\n specified (if filemode is unspecified, it defaults to 'w').\n \"\"\"\n\n logger_name = __name__.split('.')[0]\n logger = logging.getLogger(logger_name)\n\n if logger_name in init_loggers:\n add_file_handler_if_needed(logger, log_file, file_mode, log_level)\n return logger\n\n for handler in logger.root.handlers:\n if type(handler) is logging.StreamHandler:\n handler.setLevel(logging.ERROR)\n\n stream_handler = logging.StreamHandler()\n handlers = [stream_handler]\n\n if log_file is not None:\n file_handler = logging.FileHandler(log_file, file_mode)\n handlers.append(file_handler)\n\n for handler in handlers:\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n logger.addHandler(handler)\n\n logger.setLevel(log_level)\n\n init_loggers[logger_name] = True\n\n return logger" } ]
import argparse import torch from llmuses.benchmarks.ceval import DATASET_ID as CEVAL_EXAM from llmuses.benchmarks.mmlu import DATASET_ID as MMLU from llmuses.benchmarks.hellaswag import DATASET_ID as HELLA_SWAG from llmuses.benchmarks.arc import DATASET_ID as ARC from llmuses.benchmarks.truthful_qa import DATASET_ID as TRUTHFUL_QA from llmuses.constants import DEFAULT_ROOT_CACHE_DIR from llmuses.evaluator import Evaluator from llmuses.models.model_adapter import MultiChoiceModelAdapter, ContinuationLogitsModelAdapter from llmuses.utils.logger import get_logger from llmuses.models.dummy_chat_model import DummyChatModel from llmuses.benchmarks.ceval import CEVALAdapter from llmuses.benchmarks.mmlu import MMLUAdapter from llmuses.benchmarks.arc import ARCAdapter from llmuses.benchmarks.hellaswag import HellaSwagAdapter from llmuses.benchmarks.truthful_qa import TruthfulQaAdapter
6,987
# Copyright (c) Alibaba, Inc. and its affiliates. # flake8: noqa logger = get_logger() # TODO: add more precision MODEL_PRECISION_MAP = {'fp16': torch.float16, 'fp32': torch.float32, 'bf16': torch.bfloat16} """ Run evaluation process for ModelScope Leaderboard. """ def parse_args(): parser = argparse.ArgumentParser(description='Run evaluation on a model') parser.add_argument('--model', help='Model id from modelscope or huggingface.', required=True) parser.add_argument('--revision', help='Model revision.', required=False, default=None) parser.add_argument('--precision', help='Model precision.', default='bf16') parser.add_argument('--work-dir', help='root work cache dir.', default=None) parser.add_argument('--outputs-dir', help='Outputs dir.', default='outputs')
# Copyright (c) Alibaba, Inc. and its affiliates. # flake8: noqa logger = get_logger() # TODO: add more precision MODEL_PRECISION_MAP = {'fp16': torch.float16, 'fp32': torch.float32, 'bf16': torch.bfloat16} """ Run evaluation process for ModelScope Leaderboard. """ def parse_args(): parser = argparse.ArgumentParser(description='Run evaluation on a model') parser.add_argument('--model', help='Model id from modelscope or huggingface.', required=True) parser.add_argument('--revision', help='Model revision.', required=False, default=None) parser.add_argument('--precision', help='Model precision.', default='bf16') parser.add_argument('--work-dir', help='root work cache dir.', default=None) parser.add_argument('--outputs-dir', help='Outputs dir.', default='outputs')
parser.add_argument('--datasets-dir', help='Datasets dir.', default=DEFAULT_ROOT_CACHE_DIR)
5
2023-12-07 06:10:49+00:00
8k
AsuradaYuci/TF-CLIP
processor/processor_clipreid_stage2.py
[ { "identifier": "AverageMeter", "path": "utils/meter.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count" }, { "identifier": "R1_mAP_eval", "path": "utils/metrics.py", "snippet": "class R1_mAP_eval():\n def __init__(self, num_query, max_rank=50, feat_norm=True, reranking=False):\n super(R1_mAP_eval, self).__init__()\n self.num_query = num_query\n self.max_rank = max_rank\n self.feat_norm = feat_norm\n self.reranking = reranking\n\n def reset(self):\n self.feats = []\n self.pids = []\n self.camids = []\n\n def update(self, output): # called once for each batch\n feat, pid, camid = output\n self.feats.append(feat.cpu())\n self.pids.extend(np.asarray(pid))\n self.camids.extend(np.asarray(camid))\n\n def compute(self): # called after each epoch\n feats = torch.cat(self.feats, dim=0)\n if self.feat_norm:\n print(\"The test feature is normalized\")\n feats = torch.nn.functional.normalize(feats, dim=1, p=2) # along channel\n # query\n qf = feats[:self.num_query]\n q_pids = np.asarray(self.pids[:self.num_query])\n q_camids = np.asarray(self.camids[:self.num_query])\n # gallery\n gf = feats[0:]\n g_pids = np.asarray(self.pids[0:])\n\n g_camids = np.asarray(self.camids[0:])\n if self.reranking:\n print('=> Enter reranking')\n # distmat = re_ranking(qf, gf, k1=20, k2=6, lambda_value=0.3)\n distmat = re_ranking(qf, gf, k1=50, k2=15, lambda_value=0.3)\n\n else:\n print('=> Computing DistMat with euclidean_distance')\n distmat = euclidean_distance(qf, gf)\n cmc, mAP = eval_func(distmat, q_pids, g_pids, q_camids, g_camids)\n\n return cmc, mAP, distmat, self.pids, self.camids, qf, gf" }, { "identifier": "save_checkpoint", "path": "utils/iotools.py", "snippet": "def save_checkpoint(state, is_best, fpath='checkpoint.pth.tar'):\n mkdir_if_missing(osp.dirname(fpath))\n torch.save(state, fpath)\n if is_best:\n shutil.copy(fpath, osp.join(osp.dirname(fpath), 'best_model.pth.tar'))" }, { "identifier": "SupConLoss", "path": "loss/supcontrast.py", "snippet": "class SupConLoss(nn.Module):\n def __init__(self, device):\n super(SupConLoss, self).__init__()\n self.device = device\n self.temperature = 1.0\n def forward(self, text_features, image_features, t_label, i_targets): \n batch_size = text_features.shape[0] \n batch_size_N = image_features.shape[0] \n mask = torch.eq(t_label.unsqueeze(1).expand(batch_size, batch_size_N), \\\n i_targets.unsqueeze(0).expand(batch_size,batch_size_N)).float().to(self.device) \n\n logits = torch.div(torch.matmul(text_features, image_features.T),self.temperature)\n # print(logits.size())\n # for numerical stability\n logits_max, _ = torch.max(logits, dim=1, keepdim=True)\n logits = logits - logits_max.detach() \n exp_logits = torch.exp(logits) \n log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True)) \n mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1) \n loss = - mean_log_prob_pos.mean()\n\n return loss" }, { "identifier": "CrossEntropyLabelSmooth", "path": "loss/softmax_loss.py", "snippet": "class CrossEntropyLabelSmooth(nn.Module):\n \"\"\"Cross entropy loss with label smoothing regularizer.\n\n Reference:\n Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n Equation: y = (1 - epsilon) * y + epsilon / K.\n\n Args:\n num_classes (int): number of classes.\n epsilon (float): weight.\n \"\"\"\n\n def __init__(self, num_classes, epsilon=0.1, use_gpu=True):\n super(CrossEntropyLabelSmooth, self).__init__()\n self.num_classes = num_classes\n self.epsilon = epsilon\n self.use_gpu = use_gpu\n self.logsoftmax = nn.LogSoftmax(dim=1)\n\n def forward(self, inputs, targets):\n \"\"\"\n Args:\n inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)\n targets: ground truth labels with shape (num_classes)\n \"\"\"\n log_probs = self.logsoftmax(inputs) \n targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1) \n if self.use_gpu: targets = targets.cuda()\n targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes\n loss = (- targets * log_probs).mean(0).sum()\n return loss" } ]
import logging import os import time import torch import torch.nn as nn import torch.distributed as dist import collections import time from utils.meter import AverageMeter from utils.metrics import R1_mAP_eval from utils.iotools import save_checkpoint from torch.cuda import amp from torch.nn import functional as F from loss.supcontrast import SupConLoss from loss.softmax_loss import CrossEntropyLabelSmooth from datetime import timedelta
3,633
optimizer_center.zero_grad() img = img.to(device) target = vid.to(device) if cfg.MODEL.SIE_CAMERA: target_cam = target_cam.to(device) else: target_cam = None if cfg.MODEL.SIE_VIEW: target_view = target_view.to(device) else: target_view = None with amp.autocast(enabled=True): B, T, C, H, W = img.shape # B=64, T=4.C=3 H=256,W=128 score, feat, logits1 = model(x = img, cam_label=target_cam, view_label=target_view, text_features2=cluster_features) score1 = score[0:3] score2 = score[3] if (n_iter + 1) % log_period == 0: loss1 = loss_fn(score1, feat, target, target_cam, logits1, isprint=True) else: loss1 = loss_fn(score1, feat, target, target_cam, logits1) targetX = target.unsqueeze(1) # 12,1 => [94 94 10 10 15 15 16 16 75 75 39 39] targetX = targetX.expand(B, T) # 12,8 => [ [94...94][94...94][10...10][10...10] ... [39...39] [39...39]] targetX = targetX.contiguous() targetX = targetX.view(B * T, -1) # 96 => [94...94 10...10 15...15 16...16 75...75 39...39] targetX = targetX.squeeze(1) loss_frame = xent_frame(score2, targetX) loss = loss1 + loss_frame / T scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() if 'center' in cfg.MODEL.METRIC_LOSS_TYPE: for param in center_criterion.parameters(): param.grad.data *= (1. / cfg.SOLVER.CENTER_LOSS_WEIGHT) scaler.step(optimizer_center) scaler.update() acc1 = (logits1.max(1)[1] == target).float().mean() acc_id1 = (score[0].max(1)[1] == target).float().mean() acc_id2 = (score[3].max(1)[1] == targetX).float().mean() loss_meter.update(loss.item(), img.shape[0]) acc_meter.update(acc1, 1) acc_meter_id1.update(acc_id1, 1) acc_meter_id2.update(acc_id2, 1) torch.cuda.synchronize() if (n_iter + 1) % log_period == 0: logger.info( "Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc_clip: {:.3f}, Acc_id1: {:.3f}, Acc_id2: {:.3f}, Base Lr: {:.2e}" .format(epoch, (n_iter + 1), len(train_loader_stage2), loss_meter.avg, acc_meter.avg, acc_meter_id1.avg, acc_meter_id2.avg, scheduler.get_lr()[0])) scheduler.step() end_time = time.time() time_per_batch = (end_time - start_time) / (n_iter + 1) if cfg.MODEL.DIST_TRAIN: pass else: logger.info("Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]" .format(epoch, time_per_batch, train_loader_stage2.batch_size / time_per_batch)) if epoch % eval_period == 0: if cfg.MODEL.DIST_TRAIN: if dist.get_rank() == 0: model.eval() for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader): with torch.no_grad(): img = img.to(device) if cfg.MODEL.SIE_CAMERA: camids = camids.to(device) else: camids = None if cfg.MODEL.SIE_VIEW: target_view = target_view.to(device) else: target_view = None feat = model(img, cam_label=camids, view_label=target_view) evaluator.update((feat, vid, camid)) cmc, mAP, _, _, _, _, _ = evaluator.compute() logger.info("Validation Results - Epoch: {}".format(epoch)) logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10, 20]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) torch.cuda.empty_cache() else: model.eval() for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader): with torch.no_grad(): img = img.to(device) if cfg.MODEL.SIE_CAMERA: camids = camids.to(device) else: camids = None if cfg.MODEL.SIE_VIEW: target_view = target_view.to(device) else: target_view = None feat = model(img, cam_label=camids, view_label=target_view) evaluator.update((feat, vid, camid)) cmc, mAP, _, _, _, _, _ = evaluator.compute() logger.info("Validation Results - Epoch: {}".format(epoch)) logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10, 20]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) torch.cuda.empty_cache() prec1 = cmc[0] + mAP is_best = prec1 > best_performance best_performance = max(prec1, best_performance) if is_best: best_epoch = epoch
def do_train_stage2(cfg, model, center_criterion, train_loader_stage1, train_loader_stage2, val_loader, optimizer, optimizer_center, scheduler, loss_fn, num_query, local_rank,num_classes): log_period = cfg.SOLVER.STAGE2.LOG_PERIOD eval_period = cfg.SOLVER.STAGE2.EVAL_PERIOD device = "cuda" epochs = cfg.SOLVER.STAGE2.MAX_EPOCHS logger = logging.getLogger("TFCLIP.train") logger.info('start training') _LOCAL_PROCESS_GROUP = None if device: model.to(local_rank) if torch.cuda.device_count() > 1 and cfg.MODEL.DIST_TRAIN: print('Using {} GPUs for training'.format(torch.cuda.device_count())) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], find_unused_parameters=True) loss_meter = AverageMeter() acc_meter = AverageMeter() acc_meter_id1 = AverageMeter() acc_meter_id2 = AverageMeter() evaluator = R1_mAP_eval(num_query, max_rank=50, feat_norm=cfg.TEST.FEAT_NORM) scaler = amp.GradScaler() xent_frame = CrossEntropyLabelSmooth(num_classes=num_classes) @torch.no_grad() def generate_cluster_features(labels, features): centers = collections.defaultdict(list) for i, label in enumerate(labels): if label == -1: continue centers[labels[i]].append(features[i]) centers = [ torch.stack(centers[idx], dim=0).mean(0) for idx in sorted(centers.keys()) ] centers = torch.stack(centers, dim=0) return centers # train all_start_time = time.monotonic() ####### 1.CLIP-Memory module #################### print("=> Automatically generating CLIP-Memory (might take a while, have a coffe)") image_features = [] labels = [] with torch.no_grad(): for n_iter, (img, vid, target_cam, target_view) in enumerate(train_loader_stage1): img = img.to(device) # torch.Size([64, 4, 3, 256, 128]) target = vid.to(device) # torch.Size([64]) if len(img.size()) == 6: # method = 'dense' b, n, s, c, h, w = img.size() assert (b == 1) img = img.view(b * n, s, c, h, w) # torch.Size([5, 8, 3, 256, 128]) with amp.autocast(enabled=True): image_feature = model(img, get_image = True) image_feature = image_feature.view(-1, image_feature.size(1)) image_feature = torch.mean(image_feature, 0, keepdim=True) # 1,512 for i, img_feat in zip(target, image_feature): labels.append(i) image_features.append(img_feat.cpu()) else: with amp.autocast(enabled=True): image_feature = model(img, get_image = True) for i, img_feat in zip(target, image_feature): labels.append(i) image_features.append(img_feat.cpu()) labels_list = torch.stack(labels, dim=0).cuda() # N torch.Size([8256]) image_features_list = torch.stack(image_features, dim=0).cuda() # torch.Size([8256, 512]) cluster_features = generate_cluster_features(labels_list.cpu().numpy(), image_features_list).detach() best_performance = 0.0 best_epoch = 1 for epoch in range(1, epochs + 1): start_time = time.time() loss_meter.reset() acc_meter.reset() acc_meter_id1.reset() acc_meter_id2.reset() evaluator.reset() model.train() for n_iter, (img, vid, target_cam, target_view) in enumerate(train_loader_stage2): optimizer.zero_grad() optimizer_center.zero_grad() img = img.to(device) target = vid.to(device) if cfg.MODEL.SIE_CAMERA: target_cam = target_cam.to(device) else: target_cam = None if cfg.MODEL.SIE_VIEW: target_view = target_view.to(device) else: target_view = None with amp.autocast(enabled=True): B, T, C, H, W = img.shape # B=64, T=4.C=3 H=256,W=128 score, feat, logits1 = model(x = img, cam_label=target_cam, view_label=target_view, text_features2=cluster_features) score1 = score[0:3] score2 = score[3] if (n_iter + 1) % log_period == 0: loss1 = loss_fn(score1, feat, target, target_cam, logits1, isprint=True) else: loss1 = loss_fn(score1, feat, target, target_cam, logits1) targetX = target.unsqueeze(1) # 12,1 => [94 94 10 10 15 15 16 16 75 75 39 39] targetX = targetX.expand(B, T) # 12,8 => [ [94...94][94...94][10...10][10...10] ... [39...39] [39...39]] targetX = targetX.contiguous() targetX = targetX.view(B * T, -1) # 96 => [94...94 10...10 15...15 16...16 75...75 39...39] targetX = targetX.squeeze(1) loss_frame = xent_frame(score2, targetX) loss = loss1 + loss_frame / T scaler.scale(loss).backward() scaler.step(optimizer) scaler.update() if 'center' in cfg.MODEL.METRIC_LOSS_TYPE: for param in center_criterion.parameters(): param.grad.data *= (1. / cfg.SOLVER.CENTER_LOSS_WEIGHT) scaler.step(optimizer_center) scaler.update() acc1 = (logits1.max(1)[1] == target).float().mean() acc_id1 = (score[0].max(1)[1] == target).float().mean() acc_id2 = (score[3].max(1)[1] == targetX).float().mean() loss_meter.update(loss.item(), img.shape[0]) acc_meter.update(acc1, 1) acc_meter_id1.update(acc_id1, 1) acc_meter_id2.update(acc_id2, 1) torch.cuda.synchronize() if (n_iter + 1) % log_period == 0: logger.info( "Epoch[{}] Iteration[{}/{}] Loss: {:.3f}, Acc_clip: {:.3f}, Acc_id1: {:.3f}, Acc_id2: {:.3f}, Base Lr: {:.2e}" .format(epoch, (n_iter + 1), len(train_loader_stage2), loss_meter.avg, acc_meter.avg, acc_meter_id1.avg, acc_meter_id2.avg, scheduler.get_lr()[0])) scheduler.step() end_time = time.time() time_per_batch = (end_time - start_time) / (n_iter + 1) if cfg.MODEL.DIST_TRAIN: pass else: logger.info("Epoch {} done. Time per batch: {:.3f}[s] Speed: {:.1f}[samples/s]" .format(epoch, time_per_batch, train_loader_stage2.batch_size / time_per_batch)) if epoch % eval_period == 0: if cfg.MODEL.DIST_TRAIN: if dist.get_rank() == 0: model.eval() for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader): with torch.no_grad(): img = img.to(device) if cfg.MODEL.SIE_CAMERA: camids = camids.to(device) else: camids = None if cfg.MODEL.SIE_VIEW: target_view = target_view.to(device) else: target_view = None feat = model(img, cam_label=camids, view_label=target_view) evaluator.update((feat, vid, camid)) cmc, mAP, _, _, _, _, _ = evaluator.compute() logger.info("Validation Results - Epoch: {}".format(epoch)) logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10, 20]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) torch.cuda.empty_cache() else: model.eval() for n_iter, (img, vid, camid, camids, target_view, _) in enumerate(val_loader): with torch.no_grad(): img = img.to(device) if cfg.MODEL.SIE_CAMERA: camids = camids.to(device) else: camids = None if cfg.MODEL.SIE_VIEW: target_view = target_view.to(device) else: target_view = None feat = model(img, cam_label=camids, view_label=target_view) evaluator.update((feat, vid, camid)) cmc, mAP, _, _, _, _, _ = evaluator.compute() logger.info("Validation Results - Epoch: {}".format(epoch)) logger.info("mAP: {:.1%}".format(mAP)) for r in [1, 5, 10, 20]: logger.info("CMC curve, Rank-{:<3}:{:.1%}".format(r, cmc[r - 1])) torch.cuda.empty_cache() prec1 = cmc[0] + mAP is_best = prec1 > best_performance best_performance = max(prec1, best_performance) if is_best: best_epoch = epoch
save_checkpoint(model.state_dict(), is_best, os.path.join(cfg.OUTPUT_DIR, 'checkpoint_ep.pth.tar'))
2
2023-12-11 04:03:46+00:00
8k
nexB/dejacode
dje/filters.py
[ { "identifier": "Dataspace", "path": "dje/models.py", "snippet": "class Dataspace(models.Model):\n \"\"\"\n The Dataspace is a way to keep data for each organization data\n separated and still store them in the same database, schema or table.\n Therefore the Dataspace is part of the primary key of most models\n and it part of a unicity constraint for these models.\n For a given installation there can be several Owner Org defined, but only\n one reference.\n\n This is an important concept used throughout DejaCode to\n separate the reference data provided by nexB from the data used in a given\n installation of DJE.\n\n It is essentially a notion of tenant in a DJE installation and is used to\n segregate org-specific and/or org-private records enabling both\n multi-tenancy as well as nexB-provided reference data and org-specific or\n customized data.\n\n This separation has several purposes such as allowing:\n * orderly and simpler data update from the nexB reference data and inter\n Dataspace data exchange\n * Dataspace specific data customizations (for instance license\n tags configurations or some preferences)\n * multi-tenancy where different organizations can share the same DJE\n instance\n \"\"\"\n\n uuid = models.UUIDField(\n _(\"UUID\"),\n default=uuid.uuid4,\n editable=False,\n unique=True,\n )\n\n name = models.SlugField(\n unique=True,\n max_length=20,\n help_text=_(\n 'Unique name of a Dataspace. The name \"nexB\" is reserved for '\n \"the creators/maintainers of the system software. Dataspace name \"\n \"only allows letters, numbers, underscores and hyphens.\"\n ),\n )\n\n homepage_url = models.URLField(\n _(\"Homepage URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"The homepage URL of the Dataspace owner.\"),\n )\n\n contact_info = models.CharField(\n _(\"Contact information\"),\n max_length=500,\n blank=True,\n help_text=_(\n \"A dedicated email address or URL for contacting the owner of \"\n \"the Dataspace. Can be used for Attribution Package generation.\"\n ),\n )\n\n notes = models.TextField(\n blank=True,\n help_text=_(\"Extended Notes about a Dataspace.\"),\n )\n\n show_license_profile_in_license_list_view = models.BooleanField(\n default=False,\n verbose_name=format_lazy(\n \"Show {license_profile} in license list view\",\n license_profile=_(\"license profile\"),\n ),\n help_text=format_lazy(\n \"When true (checked), include the {license_profile} column in the license list view.\",\n license_profile=_(\"license profile\"),\n ),\n )\n\n show_license_type_in_license_list_view = models.BooleanField(\n default=True,\n help_text=_(\n \"When true (checked), include the license type column in the license list view.\",\n ),\n )\n\n show_spdx_short_identifier_in_license_list_view = models.BooleanField(\n verbose_name=_(\"show SPDX short identifier in license list view\"),\n default=False,\n help_text=_(\n \"When true (checked), include the SPDX short identifier in the license list view.\",\n ),\n )\n\n show_usage_policy_in_user_views = models.BooleanField(\n default=True,\n help_text=_(\n \"When true (checked), include the usage policy in user views that \"\n \"show licenses or components.\",\n ),\n )\n\n show_type_in_component_list_view = models.BooleanField(\n default=False,\n help_text=_(\n \"When true (checked), include the type column in the component list view.\",\n ),\n )\n\n hide_empty_fields_in_component_details_view = models.BooleanField(\n default=False,\n help_text=_(\"When true (checked), hide empty fields in the component details view.\"),\n )\n\n set_usage_policy_on_new_component_from_licenses = models.BooleanField(\n _(\"set usage policy on component or package from license policy\"),\n default=False,\n help_text=_(\n \"When true (checked), the application will automatically assign a usage \"\n \"policy to a component or package when its license expression is set or \"\n \"updated when you create, import, edit, or copy that component or package, \"\n \"based on the associated policies that you have defined on the license policy.\"\n ),\n )\n\n logo_url = models.URLField(\n _(\"Logo URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"URL to a Dataspace Logo. If set, it will be included in reports.\"),\n )\n\n full_name = models.CharField(\n max_length=100,\n blank=True,\n help_text=_(\n \"The full name of the Dataspace organization. \"\n \"Can be used for Attribution Package generation.\"\n ),\n )\n\n address = models.TextField(\n blank=True,\n help_text=(\n \"The address of the Dataspace organization. \"\n \"Can be used for Attribution Package generation.\"\n ),\n )\n\n open_source_information_url = models.URLField(\n _(\"Open Source Information URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\n \"A public URL where you publish information about the Dataspace \"\n \"organization's Open Source policies and procedures. \"\n \"Can be used for Attribution Package generation.\"\n ),\n )\n\n open_source_download_url = models.URLField(\n _(\"Open Source Download URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\n \"A public URL where you provide copies of Open Source software that \"\n \"require Redistribution when you use them in your products. Can be \"\n \"used for Attribution Package generation.\"\n ),\n )\n\n home_page_announcements = models.TextField(\n blank=True,\n help_text=_(\n \"Use this field to enter text to appear on the DejaCode home page, \"\n \"normally for the purpose of providing your user community with \"\n \"general-purpose announcements about using DejaCode. \"\n \"Note that you can include URL's in the text if you want to direct \"\n \"users to detailed instructions and announcements.\"\n ),\n )\n\n enable_package_scanning = models.BooleanField(\n default=False,\n help_text=_(\n 'When true (checked), allows a user to click the \"Scan Package\" button when viewing '\n \"a Package, initiating a call to ScanCode.io to scan the Package based on its URL. \"\n \"This setting also activates a DejaCode feature to submit any Package created using \"\n 'the \"Add Package\" button to ScanCode.io for scanning, and it activates the Scans '\n \"choice from the DejaCode Tools dropdown menu.\"\n ),\n )\n\n update_packages_from_scan = models.BooleanField(\n _(\"Update packages automatically from scan\"),\n default=False,\n help_text=_(\n \"When true (checked), enables an automatic DejaCode process to update \"\n \"selected Package fields (such as license expression, primary language, \"\n \"copyright, etc.) when a package scan is completed, depending on the \"\n \"quality of the scan results.\"\n ),\n )\n\n enable_purldb_access = models.BooleanField(\n _(\"Enable PurlDB access\"),\n default=False,\n help_text=_(\n \"When true (checked), enables user access to the PurlDB option from the Tools menu, \"\n \"which presents a list of PurlDB data mined and scanned automatically from multiple \"\n \"public sources. Users can view PurlDB details and can create DejaCode Package \"\n \"definitions using those details, and DejaCode also presents a new PurlDB tab when \"\n \"viewing the details of a Package with matching key values. This option also enhances \"\n \"the Global Search feature to extend the search scope beyond the standard DejaCode \"\n \"objects (Packages, Components, Licenses, Owners) and perform an asynchronous query of \"\n \"the PurlDB to find relevant data.\"\n ),\n )\n\n enable_vulnerablecodedb_access = models.BooleanField(\n _(\"Enable VulnerableCodeDB access\"),\n default=False,\n help_text=_(\n \"When true (checked), authorizes DejaCode to access the VulnerableCodeDB \"\n \"using a Package URL (purl) to determine if there are any reported \"\n \"vulnerabilities for a specific Package and return the Vulnerability ID \"\n \"and related URLs to a Vulnerabilities tab in the Package details user \"\n \"view.\"\n ),\n )\n\n objects = DataspaceManager()\n\n class Meta:\n ordering = [\"name\"]\n\n def __str__(self):\n return self.name\n\n def get_admin_url(self):\n opts = self._meta\n viewname = f\"admin:{opts.app_label}_{opts.model_name}_change\"\n return reverse(viewname, args=[self.pk])\n\n def natural_key(self):\n return (self.name,)\n\n @cached_property\n def is_reference(self):\n \"\"\"Return True if this Dataspace is the reference.\"\"\"\n reference = self.__class__._default_manager.get_reference()\n return True if reference and self == reference else False\n\n def get_configuration(self, field_name=None):\n \"\"\"\n Return the associated DataspaceConfiguration.\n If a `field_name` is provided, Return the value for that field from\n the `DataspaceConfiguration`.\n \"\"\"\n try:\n configuration = self.configuration\n except ObjectDoesNotExist:\n return\n\n if field_name:\n return getattr(configuration, field_name, None)\n return configuration\n\n @property\n def has_configuration(self):\n \"\"\"Return True if an associated DataspaceConfiguration instance exists.\"\"\"\n return bool(self.get_configuration())\n\n @property\n def tab_permissions_enabled(self):\n return bool(self.get_configuration(\"tab_permissions\"))" }, { "identifier": "History", "path": "dje/models.py", "snippet": "class History(models.Model):\n ADDITION = ADDITION\n CHANGE = CHANGE\n DELETION = DELETION\n\n ACTION_FLAG_CHOICES = (\n (ADDITION, _(\"Addition\")),\n (CHANGE, _(\"Change\")),\n (DELETION, _(\"Deletion\")),\n )\n\n object_dataspace = models.ForeignKey(\n to=\"dje.Dataspace\",\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n editable=False,\n )\n\n serialized_data = models.TextField(\n null=True,\n blank=True,\n editable=False,\n help_text=_(\"Serialized data of the instance just before this change.\"),\n )\n\n # The following fields are directly taken from django.contrib.admin.models.LogEntry\n # Since the LogEntry is not abstract we cannot properly inherit from it.\n\n action_time = models.DateTimeField(\n _(\"action time\"),\n default=timezone.now,\n editable=False,\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n models.CASCADE,\n verbose_name=_(\"user\"),\n )\n\n content_type = models.ForeignKey(\n ContentType,\n models.SET_NULL,\n verbose_name=_(\"content type\"),\n blank=True,\n null=True,\n )\n\n object_id = models.TextField(\n _(\"object id\"),\n blank=True,\n null=True,\n )\n\n object_repr = models.CharField(\n _(\"object repr\"),\n max_length=200,\n )\n\n action_flag = models.PositiveSmallIntegerField(\n _(\"action flag\"),\n choices=ACTION_FLAG_CHOICES,\n )\n\n # change_message is either a string or a JSON structure\n change_message = models.TextField(\n _(\"change message\"),\n blank=True,\n )\n\n objects = HistoryManager()\n\n class Meta:\n verbose_name = _(\"history entry\")\n verbose_name_plural = _(\"history entries\")\n ordering = (\"-action_time\",)\n\n # Clone the method from Django's LogEntry model.\n __repr__ = LogEntry.__repr__\n __str__ = LogEntry.__str__\n is_addition = LogEntry.is_addition\n is_change = LogEntry.is_change\n is_deletion = LogEntry.is_deletion\n get_change_message = LogEntry.get_change_message\n get_edited_object = LogEntry.get_edited_object\n get_admin_url = LogEntry.get_edited_object\n\n @classmethod\n def log_addition(cls, user, obj, message=None):\n \"\"\"Create History entry on Addition with the proper `change_message`.\"\"\"\n if not message:\n message = [{\"added\": {}}]\n\n return cls.objects.log_action(user, obj, cls.ADDITION, message)\n\n @classmethod\n def log_change(cls, user, obj, message, serialized_data=None):\n \"\"\"Create History entry on Change.\"\"\"\n return cls.objects.log_action(user, obj, cls.CHANGE, message, serialized_data)\n\n @classmethod\n def log_deletion(cls, user, obj):\n \"\"\"\n Create History entry on Deletion.\n Include the serialized_data if `as_json()` is available on the model class.\n \"\"\"\n serialized_data = None\n with suppress(AttributeError):\n serialized_data = obj.as_json()\n\n return cls.objects.log_action(user, obj, cls.DELETION, serialized_data=serialized_data)" }, { "identifier": "is_dataspace_related", "path": "dje/models.py", "snippet": "def is_dataspace_related(model_class):\n \"\"\"\n Return True if the given model_class has a ForeignKey field related to\n the Dataspace model.\n \"\"\"\n return any(\n 1\n for f in model_class._meta.get_fields()\n if f.many_to_one and (f.related_model == Dataspace or f.related_model == \"dje.Dataspace\")\n )" }, { "identifier": "is_secured", "path": "dje/models.py", "snippet": "def is_secured(manager):\n \"\"\"Return True if the `is_secured` attribute is set to True.\"\"\"\n if not issubclass(manager.__class__, models.Manager):\n raise AssertionError\n return getattr(manager, \"is_secured\", False)" }, { "identifier": "database_re_escape", "path": "dje/utils.py", "snippet": "def database_re_escape(pattern):\n \"\"\"Escape special char for compatibility with the QuerySet `regex` filter.\"\"\"\n re_special_char = frozenset(\"!$()*+.:<=>?[]^{|}-\")\n return \"\".join([\"\\\\\" + c if c in re_special_char else c for c in pattern])" }, { "identifier": "extract_name_version", "path": "dje/utils.py", "snippet": "def extract_name_version(name_version_str):\n \"\"\"\n Return a name and a version extracted from the following syntax: 'name:version'\n Note that colons `:` characters are allowed in the name but not in the version.\n \"\"\"\n if not name_version_str or \":\" not in name_version_str:\n raise SyntaxError\n\n name, _, version = name_version_str.rpartition(\":\")\n return name, version" }, { "identifier": "get_uuids_list_sorted", "path": "dje/utils.py", "snippet": "def get_uuids_list_sorted(dataspace_id, model_class):\n \"\"\"\n Return a sorted list of uuids for a given `model_class`, limited to the\n given `dataspace_id`.\n \"\"\"\n return (\n model_class.objects.scope_by_id(dataspace_id)\n .order_by(\"uuid\")\n .values_list(\"uuid\", flat=True)\n )" }, { "identifier": "remove_field_from_query_dict", "path": "dje/utils.py", "snippet": "def remove_field_from_query_dict(query_dict, field_name, remove_value=None):\n \"\"\"\n Return an encoded URL without the value for given `field_name`.\n For multi-value filters, a single value can be removed using `remove_value`.\n This URL can be used to remove a filter value from the active filters.\n \"\"\"\n if not query_dict:\n return \"\"\n\n data = query_dict.copy()\n field_data = data.pop(field_name, [])\n\n if remove_value and len(field_data) > 1 and remove_value in field_data:\n for item in field_data:\n if item != remove_value:\n data.update({field_name: item})\n\n return data.urlencode()" } ]
import datetime import json import operator import uuid import django_filters from functools import reduce from django.contrib import messages from django.contrib.admin import filters from django.contrib.admin.options import IncorrectLookupParameters from django.contrib.auth import get_user_model from django.contrib.contenttypes.models import ContentType from django.contrib.postgres.search import SearchQuery from django.contrib.postgres.search import SearchRank from django.contrib.postgres.search import SearchVector from django.core.exceptions import ImproperlyConfigured from django.db import models from django.db.models import Case from django.db.models import IntegerField from django.db.models import Q from django.db.models import Value from django.db.models import When from django.forms import widgets from django.forms.fields import MultipleChoiceField from django.urls import reverse from django.utils import timezone from django.utils.translation import gettext_lazy as _ from dje.models import Dataspace from dje.models import History from dje.models import is_dataspace_related from dje.models import is_secured from dje.utils import database_re_escape from dje.utils import extract_name_version from dje.utils import get_uuids_list_sorted from dje.utils import remove_field_from_query_dict
6,158
super().__init__(*args, **kwargs) def filter(self, qs, value): value = value or () # Make sure we have an iterable # Even though not a noop, no point filtering if empty if not value: return qs q = Q() for v in set(value): try: name, version = extract_name_version(v) except SyntaxError: pass else: q |= Q(**{self.name_field_name: name, self.version_field_name: version}) if self.distinct: return self.get_method(qs)(q).distinct() return self.get_method(qs)(q) class BooleanChoiceFilter(django_filters.ChoiceFilter): def __init__(self, *args, **kwargs): kwargs["empty_label"] = kwargs.pop("empty_label", "All") kwargs["choices"] = kwargs.pop( "choices", ( ("yes", _("Yes")), ("no", _("No")), ), ) super().__init__(*args, **kwargs) def filter(self, qs, value): boolean_value = {"yes": True, "no": False}.get(value) if boolean_value is not None: return qs.filter(**{self.field_name: boolean_value}).distinct() return qs class ChoicesOnlyListFilterMixin: """Remove the 'All' choice from SimpleListFilter.choices()""" def choices(self, cl): for lookup, title in self.lookup_choices: yield { "selected": str(self.value()) == str(lookup), "query_string": cl.get_query_string( { self.parameter_name: lookup, }, [], ), "display": title, } class BaseDataspaceLookupsFilter(filters.SimpleListFilter): def lookups(self, request, model_admin): user_dataspace = request.user.dataspace reference_dataspace = Dataspace.objects.get_reference() if user_dataspace == reference_dataspace: dataspaces = Dataspace.objects.all() else: dataspaces = [user_dataspace] if reference_dataspace: dataspaces.append(reference_dataspace) return [(dataspace.id, dataspace.name) for dataspace in dataspaces] class DataspaceFilter(ChoicesOnlyListFilterMixin, BaseDataspaceLookupsFilter): """ Scope the ChangeList results by a Dataspace. Default is the current User Dataspace. Anyone can look into reference Dataspace. Only Reference User can look into other Dataspaces. """ title = _("dataspace") parameter_name = "dataspace__id__exact" def lookups(self, request, model_admin): """Set the lookup value for the current user dataspace choice to None.""" lookups = super().lookups(request, model_admin) return [(None if name == request.user.dataspace.name else pk, name) for pk, name in lookups] def queryset(self, request, queryset): if self.value(): return queryset.scope_by_id(self.value()) return queryset.scope(request.user.dataspace) class MissingInFilter(BaseDataspaceLookupsFilter): """ Filter by objects missing in the given dataspace, compared with the current `DataspaceFilter.parameter_name` or user dataspace. Both values for reference and target Dataspace are validated against the self.lookup_choices to make sure the user has the proper access permissions. This filter is only available to superusers, this is enforced in DataspacedAdmin.get_list_filter() """ title = _("missing in") parameter_name = "missing_in" def queryset(self, request, queryset): if not self.value(): return valid_choices = [str(choice) for choice, _ in self.lookup_choices] if str(self.value()) not in valid_choices: raise IncorrectLookupParameters()
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # IS_FILTER_LOOKUP_VAR = "_filter_lookup" class FilterSetUtilsMixin: def is_active(self): """Return True if any of the filter is active, except the 'sort' filter.""" return bool( [field_name for field_name in self.form.changed_data if field_name not in ["sort"]] ) def get_query_no_sort(self): return remove_field_from_query_dict(self.data, "sort") def get_filters_breadcrumbs(self): return [ { "label": self.filters[field_name].label, "value": value, "remove_url": remove_field_from_query_dict(self.data, field_name, value), } for field_name in self.form.changed_data for value in self.data.getlist(field_name) ] class DataspacedFilterSet(FilterSetUtilsMixin, django_filters.FilterSet): related_only = [] def __init__(self, *args, **kwargs): try: self.dataspace = kwargs.pop("dataspace") except KeyError: raise AttributeError("A dataspace needs to be provided to this FilterSet.") self.dynamic_qs = kwargs.pop("dynamic_qs", True) self.parent_qs_cache = {} super().__init__(*args, **kwargs) for field_name, filter_ in self.filters.items(): # Dataspace scoping for FKs on DataspaceRelated models. if hasattr(filter_, "queryset") and is_dataspace_related(filter_.queryset.model): filter_.queryset = filter_.queryset.scope(self.dataspace) if field_name in self.related_only: self.apply_related_only(field_name, filter_) usage_policy = self.filters.get("usage_policy") if usage_policy: model_name = self._meta.model._meta.model_name usage_policy.queryset = usage_policy.queryset.filter(content_type__model=model_name) def apply_related_only(self, field_name, filter_): """ Limit the filter choices to the values used on the parent queryset. This logic emulate a facets logic. See also `django.contrib.admin.filters.RelatedOnlyFieldListFilter`. """ parent_qs = self.get_parent_qs_for_related_only(field_name) is_related_field = hasattr(filter_, "queryset") if is_related_field: # FK type fields filter_.queryset = filter_.queryset.distinct().filter( pk__in=parent_qs.values_list(f"{field_name}__pk", flat=True) ) else: # Choices type fields choices_qs = ( parent_qs.order_by(field_name).distinct().values_list(field_name, flat=True) ) filter_.extra["choices"] = [ choice for choice in filter_.extra["choices"] if choice[0] in choices_qs ] def get_parent_qs_for_related_only(self, field_name): """ Return the parent QuerySet with active filters applied except for the given `filter_name`. The model default manager is used in place of the self.queryset since it do not containing the annotations and select/prefetch_related that are not needed for that dynamic filtering. """ parent_qs = self._meta.model._default_manager.scope(self.dataspace) if not self.dynamic_qs: return parent_qs data = self.data.copy() # `sort` is only used for ordering and does not apply here. # Removing it from the queryset improves the performances. fields_to_remove = [ "sort", field_name, ] for name in fields_to_remove: data.pop(name, None) if not data: return parent_qs cache_key = json.dumps(data, sort_keys=True) cached_qs = self.parent_qs_cache.get(cache_key, None) if cached_qs: return cached_qs filterset = self.__class__( data=data, dataspace=self.dataspace, queryset=parent_qs, dynamic_qs=False, ) self.parent_qs_cache[cache_key] = filterset.qs return filterset.qs class SearchFilter(django_filters.CharFilter): def __init__(self, search_fields, *args, **kwargs): super().__init__(*args, **kwargs) self.search_fields = search_fields def filter(self, qs, value): lookup_type = "icontains" for bit in value.split(): or_queries = [ models.Q(**{f"{field}__{lookup_type}": bit}) for field in self.search_fields ] qs = qs.filter(reduce(operator.or_, or_queries)) return qs class SearchRankFilter(SearchFilter): """ Search on multiple fields using django.contrib.postgres.search module capabilities. For better performance, all given `search_fields` should be indexed (db_index=True). """ def __init__(self, min_rank=0.01, *args, **kwargs): super().__init__(*args, **kwargs) self.min_rank = min_rank def filter(self, qs, value): if not value: return qs vector = SearchVector(*self.search_fields) query = SearchQuery(value) default_ordering = qs.model._meta.ordering qs = ( qs.annotate(rank=SearchRank(vector, query)) .filter(rank__gte=self.min_rank) .order_by("-rank", *default_ordering) ) return qs.distinct() if self.distinct else qs class MatchOrderedSearchFilter(SearchRankFilter): """ Start with a case-insensitive containment search on the `name` field, ordering based on the match type using annotations. If that simple search Return nothing, fallback to the SearchRankFilter searching, this allows "name version" type string to return some results. Postgres pattern matching docs available at: https://www.postgresql.org/docs/10/static/functions-matching.html#POSIX-CONSTRAINT-ESCAPES-TABLE """ def __init__(self, match_order_fields, *args, **kwargs): super().__init__(*args, **kwargs) self.match_order_fields = match_order_fields def get_match_order_lookups(self, lookup_type, value): or_queries = [ models.Q(**{f"{field}__{lookup_type}": value}) for field in self.match_order_fields ] return reduce(operator.or_, or_queries) def filter(self, qs, value): if not value: return qs # \y matches only at the beginning or end of a word regex_escaped_value = r"\y{}\y".format(database_re_escape(value)) # All matching patterns are applied case-insensitive match_order = Case( # 1. Exact match When(self.get_match_order_lookups("iexact", value), then=Value(1)), # 2. Contains word with boundaries When(self.get_match_order_lookups("iregex", regex_escaped_value), then=Value(2)), # 3. Contains word default=Value(3), # default `icontains` clause in `.filter()` output_field=IntegerField(), ) default_ordering = self.model._meta.ordering simple_search_qs = ( qs.filter(self.get_match_order_lookups("icontains", value)) .annotate(match_order=match_order) .order_by("match_order", *default_ordering) ) if simple_search_qs.exists(): if self.distinct: simple_search_qs = simple_search_qs.distinct() return simple_search_qs return super().filter(qs, value) class ProgressiveTextSearchFilter(SearchRankFilter): """Start with a icontains search before falling back on a ranking search.""" def filter(self, qs, value): if not value: return qs if len(self.search_fields) != 1: raise ImproperlyConfigured(f"Only 1 field supported for {self.__class__}") search_field = self.search_fields[0] contains_search_qs = qs.filter(**{f"{search_field}__icontains": value}) if list(contains_search_qs): return contains_search_qs vector = SearchVector(search_field) query = SearchQuery(value) return ( qs.annotate(rank=SearchRank(vector, query)) .filter(rank__gte=self.min_rank) .order_by("-rank") ) class DefaultOrderingFilter(django_filters.OrderingFilter): """Add default ordering from model meta after the provided value.""" def filter(self, qs, value): qs = super().filter(qs, value) ordering = qs.query.order_by if not ordering: return qs # Add the default ordering from the model and override the order_by value for field_name in self.model._meta.ordering: if field_name not in ordering: ordering += (field_name,) return qs.order_by(*ordering) class CharMultipleWidget(widgets.TextInput): """ Enable the support for `MultiValueDict` `?field=a&field=b` reusing the `SelectMultiple.value_from_datadict()` but render as a `TextInput`. """ def value_from_datadict(self, data, files, name): value = widgets.SelectMultiple().value_from_datadict(data, files, name) if not value or value == [""]: return "" return value def format_value(self, value): """Return a value as it should appear when rendered in a template.""" return ", ".join(value) class MultipleCharField(MultipleChoiceField): widget = CharMultipleWidget def valid_value(self, value): return True class MultipleCharFilter(django_filters.MultipleChoiceFilter): """Filter on multiple values for a CharField type using `?field=a&field=b` URL syntax.""" field_class = MultipleCharField class MultipleUUIDField(MultipleChoiceField): widget = CharMultipleWidget def valid_value(self, value): try: uuid.UUID(value) except ValueError: return False return True class MultipleUUIDFilter(django_filters.MultipleChoiceFilter): """Filter on multiple values for an `UUIDField` type using `?field=a&field=b` URL syntax.""" help_text = "Exact UUID. Multi-value supported." field_class = MultipleUUIDField def __init__(self, *args, **kwargs): kwargs.setdefault("help_text", self.help_text) super().__init__(*args, **kwargs) class LastModifiedDateFilter(django_filters.DateTimeFilter): help_text = ( "Limits to records created or updated since that date. " 'Supports both "YYYY-MM-DD" date and "YYYY-MM-DD HH:MM" datetime.' ) def __init__(self, *args, **kwargs): kwargs.setdefault("help_text", self.help_text) kwargs["lookup_expr"] = "gte" super().__init__(*args, **kwargs) class NameVersionFilter(MultipleCharFilter): """ Filter by `name:version` syntax. Supports multiple values: `?name_version=Name:Version&name_version=Name:Version` """ help_text = ( 'Exact match on name/version using the syntax "name:version". Multi-value supported.' ) def __init__(self, *args, **kwargs): kwargs.setdefault("help_text", self.help_text) self.name_field_name = kwargs.pop("name_field_name", "name") self.version_field_name = kwargs.pop("version_field_name", "version") super().__init__(*args, **kwargs) def filter(self, qs, value): value = value or () # Make sure we have an iterable # Even though not a noop, no point filtering if empty if not value: return qs q = Q() for v in set(value): try: name, version = extract_name_version(v) except SyntaxError: pass else: q |= Q(**{self.name_field_name: name, self.version_field_name: version}) if self.distinct: return self.get_method(qs)(q).distinct() return self.get_method(qs)(q) class BooleanChoiceFilter(django_filters.ChoiceFilter): def __init__(self, *args, **kwargs): kwargs["empty_label"] = kwargs.pop("empty_label", "All") kwargs["choices"] = kwargs.pop( "choices", ( ("yes", _("Yes")), ("no", _("No")), ), ) super().__init__(*args, **kwargs) def filter(self, qs, value): boolean_value = {"yes": True, "no": False}.get(value) if boolean_value is not None: return qs.filter(**{self.field_name: boolean_value}).distinct() return qs class ChoicesOnlyListFilterMixin: """Remove the 'All' choice from SimpleListFilter.choices()""" def choices(self, cl): for lookup, title in self.lookup_choices: yield { "selected": str(self.value()) == str(lookup), "query_string": cl.get_query_string( { self.parameter_name: lookup, }, [], ), "display": title, } class BaseDataspaceLookupsFilter(filters.SimpleListFilter): def lookups(self, request, model_admin): user_dataspace = request.user.dataspace reference_dataspace = Dataspace.objects.get_reference() if user_dataspace == reference_dataspace: dataspaces = Dataspace.objects.all() else: dataspaces = [user_dataspace] if reference_dataspace: dataspaces.append(reference_dataspace) return [(dataspace.id, dataspace.name) for dataspace in dataspaces] class DataspaceFilter(ChoicesOnlyListFilterMixin, BaseDataspaceLookupsFilter): """ Scope the ChangeList results by a Dataspace. Default is the current User Dataspace. Anyone can look into reference Dataspace. Only Reference User can look into other Dataspaces. """ title = _("dataspace") parameter_name = "dataspace__id__exact" def lookups(self, request, model_admin): """Set the lookup value for the current user dataspace choice to None.""" lookups = super().lookups(request, model_admin) return [(None if name == request.user.dataspace.name else pk, name) for pk, name in lookups] def queryset(self, request, queryset): if self.value(): return queryset.scope_by_id(self.value()) return queryset.scope(request.user.dataspace) class MissingInFilter(BaseDataspaceLookupsFilter): """ Filter by objects missing in the given dataspace, compared with the current `DataspaceFilter.parameter_name` or user dataspace. Both values for reference and target Dataspace are validated against the self.lookup_choices to make sure the user has the proper access permissions. This filter is only available to superusers, this is enforced in DataspacedAdmin.get_list_filter() """ title = _("missing in") parameter_name = "missing_in" def queryset(self, request, queryset): if not self.value(): return valid_choices = [str(choice) for choice, _ in self.lookup_choices] if str(self.value()) not in valid_choices: raise IncorrectLookupParameters()
return queryset.exclude(uuid__in=get_uuids_list_sorted(self.value(), queryset.model))
6
2023-12-07 16:57:42+00:00
8k
kylemcdonald/i2i-realtime
solo_app.py
[ { "identifier": "ThreadedWorker", "path": "threaded_worker.py", "snippet": "class ThreadedWorker:\n def __init__(self, has_input=True, has_output=True, mode=\"thread\", debug=False):\n if mode == \"thread\":\n self.ParallelClass = threading.Thread\n self.QueueClass = queue.Queue\n elif mode == \"process\":\n self.ParallelClass = multiprocessing.Process\n self.QueueClass = multiprocessing.Queue\n if has_input:\n self.input_queue = self.QueueClass()\n if has_output:\n self.output_queue = self.QueueClass()\n self.should_exit = False\n self.parallel = self.ParallelClass(target=self.run)\n self.name = self.__class__.__name__\n \n self.debug = debug\n self.last_print = time.time()\n self.print_interval = 1\n self.durations = []\n\n def set_name(self, name):\n self.name = name\n return self\n\n def feed(self, feeder):\n print(self.name, \"feeding with\", feeder.name)\n self.input_queue = feeder.output_queue\n return self\n\n def start(self):\n if self.parallel.is_alive():\n return self\n print(self.name, \"starting\")\n self.parallel.start()\n return self\n\n # called after the parallel is started\n def setup(self):\n pass\n \n def clear_input(self):\n with self.input_queue.mutex:\n self.input_queue.queue.clear()\n\n # called before the parallel is joined\n def cleanup(self):\n pass\n\n def run(self):\n print(self.name, \"running\")\n self.setup()\n try:\n while not self.should_exit:\n \n cur_time = time.time()\n if hasattr(self, \"input_queue\"):\n try:\n input = self.input_queue.get(timeout=0.1)\n except queue.Empty:\n continue\n if input is None:\n break\n start_time = time.time()\n result = self.work(input)\n else:\n start_time = time.time()\n result = self.work()\n duration = time.time() - start_time\n \n if result is not None and hasattr(self, \"output_queue\"):\n self.output_queue.put(result)\n \n self.durations.append(duration)\n if len(self.durations) > 10:\n self.durations.pop(0)\n \n time_since_print = cur_time - self.last_print\n if self.debug and time_since_print > self.print_interval:\n duration = sum(self.durations) / len(self.durations)\n print(self.name, f\"{duration*1000:.2f}ms\", flush=True)\n self.last_print = cur_time\n \n except KeyboardInterrupt:\n print(self.name, \"interrupted\")\n self.cleanup()\n\n def close(self):\n print(self.name, \"closing\")\n self.should_exit = True\n if hasattr(self, \"input_queue\"):\n self.input_queue.put(None)\n if self.parallel.is_alive():\n self.parallel.join()" }, { "identifier": "DiffusionProcessor", "path": "diffusion_processor.py", "snippet": "class DiffusionProcessor:\n def __init__(self, warmup=None, local_files_only=True):\n base_model = \"stabilityai/sdxl-turbo\"\n vae_model = \"madebyollin/taesdxl\"\n\n warnings.filterwarnings(\"ignore\", category=torch.jit.TracerWarning)\n\n disable_progress_bar()\n self.pipe = AutoPipelineForImage2Image.from_pretrained(\n base_model,\n torch_dtype=torch.float16,\n variant=\"fp16\",\n local_files_only=local_files_only,\n )\n\n self.pipe.vae = AutoencoderTiny.from_pretrained(\n vae_model, torch_dtype=torch.float16, local_files_only=local_files_only\n )\n fix_seed(self.pipe)\n\n print(\"Model loaded\")\n\n config = CompilationConfig.Default()\n config.enable_xformers = True\n config.enable_triton = True\n config.enable_cuda_graph = True\n self.pipe = compile(self.pipe, config=config)\n\n print(\"Model compiled\")\n\n self.pipe.to(device=\"cuda\", dtype=torch.float16)\n self.pipe.set_progress_bar_config(disable=True)\n\n print(\"Model moved to GPU\", flush=True)\n \n self.compel = Compel(\n tokenizer=[self.pipe.tokenizer, self.pipe.tokenizer_2],\n text_encoder=[self.pipe.text_encoder, self.pipe.text_encoder_2],\n returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED,\n requires_pooled=[False, True],\n )\n self.prompt_cache = FixedSizeDict(32)\n print(\"Prepared compel\")\n\n self.generator = torch.manual_seed(0)\n \n if warmup:\n warmup_shape = [int(e) for e in warmup.split(\"x\")]\n images = np.zeros(warmup_shape, dtype=np.float32)\n for i in range(2):\n print(f\"Warmup {warmup} {i+1}/2\")\n start_time = time.time()\n self.run(\n images,\n prompt=\"warmup\",\n num_inference_steps=2,\n strength=1.0\n )\n print(\"Warmup finished\", flush=True)\n \n def embed_prompt(self, prompt):\n if prompt not in self.prompt_cache:\n with torch.no_grad():\n print(\"embedding prompt\", prompt)\n self.prompt_cache[prompt] = self.compel(prompt)\n return self.prompt_cache[prompt]\n \n def meta_embed_prompt(self, prompt):\n pattern = r'\\(\"(.*?)\"\\s*,\\s*\"(.*?)\"\\)\\.blend\\((.*?),(.*?)\\)'\n match = re.search(pattern, prompt)\n if not match:\n return self.embed_prompt(prompt)\n str1, str2, t1, t2 = match.groups()\n t1 = float(t1)\n t2 = float(t2)\n cond1, pool1 = self.embed_prompt(str1)\n cond2, pool2 = self.embed_prompt(str2)\n cond = cond1 * t1 + cond2 * t2\n pool = pool1 * t1 + pool2 * t2\n return cond, pool\n \n def run(self, images, prompt, num_inference_steps, strength, use_compel=False, seed=None):\n strength = min(max(1 / num_inference_steps, strength), 1)\n if seed is not None:\n self.generator = torch.manual_seed(seed)\n kwargs = {}\n if use_compel:\n conditioning, pooled = self.meta_embed_prompt(prompt)\n batch_size = len(images)\n conditioning_batch = conditioning.expand(batch_size, -1, -1)\n pooled_batch = pooled.expand(batch_size, -1)\n kwargs[\"prompt_embeds\"] = conditioning_batch\n kwargs[\"pooled_prompt_embeds\"] = pooled_batch\n else:\n kwargs[\"prompt\"] = [prompt] * len(images)\n return self.pipe(\n image=images,\n generator=self.generator,\n num_inference_steps=num_inference_steps,\n guidance_scale=0,\n strength=strength,\n output_type=\"np\",\n **kwargs\n ).images" }, { "identifier": "Settings", "path": "settings.py", "snippet": "class Settings(BaseSettings):\n # config, cannot be changed\n mode: str = Field(default=\"video\")\n worker_id: int = Field(default=0)\n \n output_fast: bool = Field(default=True)\n zmq_video_port: int = Field(default=5554)\n job_start_port: int = Field(default=5555)\n settings_port: int = Field(default=5556)\n job_finish_port: int = Field(default=5557)\n output_port: int = Field(default=5558)\n osc_port: int = Field(default=8000)\n primary_hostname: str = Field(default='localhost')\n \n translation: bool = Field(default=False)\n safety: bool = Field(default=False)\n local_files_only: bool = Field(default=False)\n warmup: str = Field(default=None)\n threaded: bool = Field(default=False)\n \n # parameters for inference\n prompt: str = Field(default='A psychedelic landscape.')\n num_inference_steps: int = Field(default=2)\n fixed_seed: bool = Field(default=True)\n seed: int = Field(default=0)\n batch_size: int = Field(default=4)\n strength: float = Field(default=0.7)\n passthrough: bool = Field(default=False)\n compel: bool = Field(default=True)\n \n # can be changed dynamically\n opacity: float = Field(default=1.0)\n mirror: bool = Field(default=False)\n debug: bool = Field(default=False)\n pad: bool = Field(default=False)\n fps: int = Field(default=30)\n directory: str = Field(default='data/frames')\n \n class Config:\n env_file = \".env\"\n env_file_encoding = 'utf-8'" }, { "identifier": "SettingsAPI", "path": "settings_api.py", "snippet": "class SettingsAPI:\n def __init__(self, settings):\n self.shutdown = False\n self.settings = settings\n port = settings.settings_port\n self.thread = threading.Thread(target=self.run, args=(port,))\n \n def start(self):\n if not self.thread.is_alive():\n self.thread.start()\n\n def run(self, port):\n if self.settings.translation:\n translate = Translate()\n if self.settings.safety:\n safety_checker = SafetyChecker()\n\n app = FastAPI()\n\n # Add CORS middleware\n app.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n )\n\n @app.get(\"/prompt/{msg}\")\n async def prompt(msg: str):\n if self.settings.translation:\n prompt = translate.translate_to_en(msg)\n if prompt != msg:\n print(\"Translating from:\", msg)\n else:\n prompt = msg\n \n override = \"-f\" in prompt\n if override:\n prompt = prompt.replace(\"-f\", \"\").strip()\n if self.settings.safety and not override:\n safety = safety_checker(prompt)\n if safety != \"safe\":\n print(f\"Ignoring prompt ({safety}):\", prompt)\n return {\"safety\": \"unsafe\"}\n \n self.settings.prompt = prompt\n print(\"Updated prompt:\", prompt)\n return {\"safety\": \"safe\"}\n\n @app.get(\"/directory/{status}\")\n async def directory(status: str):\n self.settings.directory = \"data/\" + status\n print(\"Updated directory status:\", self.settings.directory)\n return {\"status\": \"updated\"}\n \n @app.get(\"/debug/{status}\")\n async def debug(status: bool):\n self.settings.debug = status\n print(\"Updated debug status:\", status)\n return {\"status\": \"updated\"}\n \n @app.get(\"/compel/{status}\")\n async def compel(status: bool):\n self.settings.compel = status\n print(\"Updated compel status:\", status)\n return {\"status\": \"updated\"}\n \n @app.get(\"/passthrough/{status}\")\n async def passthrough(status: bool):\n self.settings.passthrough = status\n print(\"Updated passthrough status:\", self.settings.passthrough)\n return {\"status\": \"updated\"}\n\n @app.get(\"/fixed_seed/{status}\")\n async def fixed_seed(status: bool):\n self.settings.fixed_seed = status\n print(\"Updated fixed_seed status:\", self.settings.fixed_seed)\n return {\"status\": \"updated\"}\n \n @app.get(\"/mirror/{status}\")\n async def mirror(status: bool):\n self.settings.mirror = status\n print(\"Updated mirror status:\", status)\n return {\"status\": \"updated\"}\n\n @app.get(\"/batch_size/{value}\")\n async def batch_size(value: int):\n self.settings.batch_size = value\n print(\"Updated batch_size:\", self.settings.batch_size)\n return {\"status\": \"updated\"}\n\n @app.get(\"/seed/{value}\")\n async def seed(value: int):\n self.settings.seed = value\n print(\"Updated seed:\", self.settings.seed)\n return {\"status\": \"updated\"}\n\n @app.get(\"/steps/{value}\")\n async def steps(value: int):\n self.settings.num_inference_steps = value\n print(\"Updated num_inference_steps:\", self.settings.num_inference_steps)\n return {\"status\": \"updated\"}\n\n @app.get(\"/strength/{value}\")\n async def strength(value: float):\n self.settings.strength = value\n print(\"Updated strength:\", self.settings.strength)\n return {\"status\": \"updated\"}\n \n @app.get(\"/opacity/{value}\")\n async def opacity(value: float):\n value = min(max(value, 0), 1)\n self.settings.opacity = value\n print(\"Updated opacity:\", self.settings.opacity)\n return {\"status\": \"updated\"}\n\n config = uvicorn.Config(app, host=\"0.0.0.0\", port=port, log_level=\"info\")\n self.server = uvicorn.Server(config=config)\n try:\n self.server.run()\n except KeyboardInterrupt:\n pass\n\n def close(self):\n print(\"SettingsAPI closing\")\n if hasattr(self, \"server\"):\n self.server.should_exit = True\n self.thread.join()" }, { "identifier": "OscSettingsController", "path": "osc_settings_controller.py", "snippet": "class OscSettingsController(ThreadedWorker):\n def __init__(self, settings):\n super().__init__(has_input=False, has_output=False)\n address = f\"0.0.0.0:{settings.osc_port}\"\n print(self.name, f\"connecting to OSC on {address}\")\n self.osc = OscSocket(\"0.0.0.0\", settings.osc_port)\n self.settings = settings\n self.prompt_0 = \"\"\n self.prompt_1 = \"\"\n self.blend = 0.5\n \n def update_blend(self):\n if self.blend == 0:\n self.settings.prompt = self.prompt_0\n elif self.blend == 1:\n self.settings.prompt = self.prompt_1\n else:\n a = self.prompt_0\n b = self.prompt_1\n t = self.blend\n self.settings.prompt = f'(\"{a}\", \"{b}\").blend({1-t:.2f}, {t:.2f})'\n \n def work(self):\n try:\n msg = self.osc.recv()\n if msg is None:\n return\n if msg.address == \"/prompt\":\n prompt = ' '.join(msg.params)\n # print(\"OSC prompt:\", prompt)\n self.settings.prompt = prompt\n \n elif msg.address == \"/blend\":\n a, b, t = msg.params\n self.prompt_0 = a\n self.prompt_1 = b\n self.blend = t\n self.update_blend()\n elif msg.address == \"/prompt/0\":\n self.prompt_0 = ' '.join(msg.params)\n self.update_blend()\n elif msg.address == \"/prompt/1\":\n self.prompt_1 = ' '.join(msg.params)\n self.update_blend()\n elif msg.address == \"/blend_t\":\n self.blend = float(msg.params[0])\n self.update_blend()\n \n elif msg.address == \"/seed\":\n seed = msg.params[0]\n # print(\"OSC seed:\", seed)\n self.settings.seed = seed\n elif msg.address == \"/opacity\":\n opacity = float(msg.params[0])\n opacity = min(max(opacity, 0), 1)\n self.settings.opacity = opacity\n elif msg.address == \"/mode\":\n mode = msg.params[0]\n if mode == \"soft\":\n self.settings.num_inference_steps = 3\n self.settings.strength = 0.5\n elif mode == \"hard\":\n self.settings.num_inference_steps = 2\n self.settings.strength = 0.7 \n # else:\n # print(\"unknown osc\", msg.address, msg.params)\n except TypeError:\n print(\"osc TypeError\")\n except osc_packet.ParseError:\n print(\"osc ParseError\")\n except Exception as e:\n print(\"osc error\", e)\n \n def cleanup(self):\n self.osc.close()" } ]
import time import zmq import sdl2 import sdl2.ext import numpy as np import ctypes import numpy as np import torch import torch.nn.functional as F from threaded_worker import ThreadedWorker from diffusion_processor import DiffusionProcessor from settings import Settings from settings_api import SettingsAPI from osc_settings_controller import OscSettingsController
5,707
if len(msg) == 8294400: img = torch.from_numpy(unpack_rgb444_image(msg, (1080, 1920))) elif len(msg) == 4147200: img = torch.frombuffer(msg, dtype=torch.uint8).view(1080, 1920, 2) else: print(f"Unknown image size {len(msg)}") return # self.batch.append(img) # on CPU from here self.batch.append(img.to("cuda")) # on GPU from here self.settings_batch.append(settings.copy()) n = self.batch_size if len(self.batch) >= n: batch = torch.stack(self.batch[:n]) # save the first n elements if batch.shape[1] == 3: batch = half_size_batch(batch) elif batch.shape[-1] == 2: batch = uyvy_to_rgb_batch(batch) else: print("unknown channels") settings_batch = self.settings_batch[:n] self.batch = self.batch[n:] # drop the first n elements self.settings_batch = self.settings_batch[n:] return batch, settings_batch def cleanup(self): self.sock.close() self.context.term() class Processor(ThreadedWorker): def __init__(self, settings): super().__init__(has_input=True, has_output=True, debug=True) self.batch_size = settings.batch_size self.settings = settings def setup(self): warmup = f"{self.batch_size}x540x960x3" self.diffusion_processor = DiffusionProcessor(warmup=warmup) self.clear_input() # drop old frames def work(self, args): images, settings_batch = args # cuda_images = torch.FloatTensor(np.array(images)).to("cuda") results = self.diffusion_processor.run( images=images, prompt=self.settings.prompt, use_compel=True, num_inference_steps=2, strength=0.7, seed=self.settings.seed) for frame_settings, image, result in zip(settings_batch, images, results): if frame_settings.opacity == 1: self.output_queue.put(result) else: opacity = float(frame_settings.opacity) input_image = np.transpose(image.cpu().numpy(), (1, 2, 0))[:result.shape[0]] blended = result * opacity + input_image * (1 - opacity) self.output_queue.put(blended) class Display(ThreadedWorker): def __init__(self, batch_size): super().__init__(has_input=True, has_output=False) self.fullscreen = True self.batch_size = batch_size self.width = 960 self.height = 536 self.channels = 3 self.frame_repeat = 2 def setup(self): sdl2.ext.init() self.window = sdl2.ext.Window("i2i", size=(self.width, self.height)) self.renderer = sdl2.ext.Renderer(self.window, flags=sdl2.SDL_RENDERER_ACCELERATED | sdl2.SDL_RENDERER_PRESENTVSYNC) self.window.show() self.event = sdl2.SDL_Event() self.texture = sdl2.SDL_CreateTexture(self.renderer.sdlrenderer, sdl2.SDL_PIXELFORMAT_RGB24, sdl2.SDL_TEXTUREACCESS_STREAMING, self.width, self.height) if self.fullscreen: sdl2.SDL_SetWindowFullscreen(self.window.window, sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP) self.clear_input() # drop old frames def work(self, frame): while self.input_queue.qsize() > self.batch_size: # print("dropping frame") frame = self.input_queue.get() # Event handling while sdl2.SDL_PollEvent(ctypes.byref(self.event)): if self.event.type == sdl2.SDL_QUIT: self.should_exit = True elif self.event.type == sdl2.SDL_KEYDOWN: keysym = self.event.key.keysym.sym if keysym == sdl2.SDLK_f: self.fullscreen = not self.fullscreen mode = sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP if self.fullscreen else 0 sdl2.SDL_SetWindowFullscreen(self.window.window, mode) # Update texture image_data = (frame * 255).astype(np.uint8) sdl2.SDL_UpdateTexture(self.texture, None, image_data.ctypes.data, self.width * self.channels) # Render noise on screen sdl2.SDL_RenderClear(self.renderer.sdlrenderer) for i in range(self.frame_repeat): sdl2.SDL_RenderCopy(self.renderer.sdlrenderer, self.texture, None, None) sdl2.SDL_RenderPresent(self.renderer.sdlrenderer) # Renderer will now wait for vsync def cleanup(self): sdl2.SDL_DestroyTexture(self.texture) sdl2.ext.quit() settings = Settings()
def unpack_rgb444_image(buffer, image_shape): mask = (2<<10) - 1 img = np.frombuffer(buffer, dtype=np.uint32).reshape(*image_shape).byteswap() red = (img >> 20) & mask green = (img >> 10) & mask blue = (img) & mask unpacked_image = np.stack((red, green, blue)).astype(np.float32) / 1024. return unpacked_image def half_size_batch(batch): return F.interpolate(batch, scale_factor=0.5, mode='area') def uyvy_to_rgb_batch(uyvy_images): # Convert the batch of images to float32 uyvy_f32 = uyvy_images.to(torch.float32) # Handle the Y channel y_channel = uyvy_f32[:, :, :, 1].unsqueeze(1) # Keep the Y channel in its own dimension y_channel = F.interpolate(y_channel, scale_factor=0.5, mode='area') # Handle the U channel u_channel = uyvy_f32[:, :, 0::2, 0].unsqueeze(1) h, w = y_channel.shape[-2], y_channel.shape[-1] # Extract the new dimensions after Y interpolation u_channel = F.interpolate(u_channel, size=(h,w), mode='area') # Handle the V channel v_channel = uyvy_f32[:, :, 1::2, 0].unsqueeze(1) v_channel = F.interpolate(v_channel, size=(h,w), mode='area') # Normalize channels to [0,1] range y_channel /= 255.0 u_channel /= 255.0 v_channel /= 255.0 # Recalculate R, G, B based on Y, U, V r = y_channel + 1.402 * (v_channel - 0.5) g = y_channel - 0.344136 * (u_channel - 0.5) - 0.714136 * (v_channel - 0.5) b = y_channel + 1.772 * (u_channel - 0.5) # Stack the channels and clamp the values rgb_images = torch.cat((r, g, b), dim=1) # Concatenate along the color channel dimension rgb_images = torch.clamp(rgb_images, 0.0, 1.0) return rgb_images class Receiver(ThreadedWorker): def __init__(self, batch_size): super().__init__(has_input=False, has_output=True) self.batch_size = batch_size def setup(self): self.context = zmq.Context() self.sock = self.context.socket(zmq.SUB) address = f"ipc:///tmp/zmq" print(f"Connecting to {address}") self.sock.connect(address) self.sock.setsockopt(zmq.SUBSCRIBE, b"") self.sock.setsockopt(zmq.RCVTIMEO, 100) self.sock.setsockopt(zmq.RCVHWM, 1) self.sock.setsockopt(zmq.LINGER, 0) self.batch = [] self.settings_batch = [] def work(self): try: msg = self.sock.recv(copy=False).bytes except zmq.Again: return if len(msg) == 8294400: img = torch.from_numpy(unpack_rgb444_image(msg, (1080, 1920))) elif len(msg) == 4147200: img = torch.frombuffer(msg, dtype=torch.uint8).view(1080, 1920, 2) else: print(f"Unknown image size {len(msg)}") return # self.batch.append(img) # on CPU from here self.batch.append(img.to("cuda")) # on GPU from here self.settings_batch.append(settings.copy()) n = self.batch_size if len(self.batch) >= n: batch = torch.stack(self.batch[:n]) # save the first n elements if batch.shape[1] == 3: batch = half_size_batch(batch) elif batch.shape[-1] == 2: batch = uyvy_to_rgb_batch(batch) else: print("unknown channels") settings_batch = self.settings_batch[:n] self.batch = self.batch[n:] # drop the first n elements self.settings_batch = self.settings_batch[n:] return batch, settings_batch def cleanup(self): self.sock.close() self.context.term() class Processor(ThreadedWorker): def __init__(self, settings): super().__init__(has_input=True, has_output=True, debug=True) self.batch_size = settings.batch_size self.settings = settings def setup(self): warmup = f"{self.batch_size}x540x960x3" self.diffusion_processor = DiffusionProcessor(warmup=warmup) self.clear_input() # drop old frames def work(self, args): images, settings_batch = args # cuda_images = torch.FloatTensor(np.array(images)).to("cuda") results = self.diffusion_processor.run( images=images, prompt=self.settings.prompt, use_compel=True, num_inference_steps=2, strength=0.7, seed=self.settings.seed) for frame_settings, image, result in zip(settings_batch, images, results): if frame_settings.opacity == 1: self.output_queue.put(result) else: opacity = float(frame_settings.opacity) input_image = np.transpose(image.cpu().numpy(), (1, 2, 0))[:result.shape[0]] blended = result * opacity + input_image * (1 - opacity) self.output_queue.put(blended) class Display(ThreadedWorker): def __init__(self, batch_size): super().__init__(has_input=True, has_output=False) self.fullscreen = True self.batch_size = batch_size self.width = 960 self.height = 536 self.channels = 3 self.frame_repeat = 2 def setup(self): sdl2.ext.init() self.window = sdl2.ext.Window("i2i", size=(self.width, self.height)) self.renderer = sdl2.ext.Renderer(self.window, flags=sdl2.SDL_RENDERER_ACCELERATED | sdl2.SDL_RENDERER_PRESENTVSYNC) self.window.show() self.event = sdl2.SDL_Event() self.texture = sdl2.SDL_CreateTexture(self.renderer.sdlrenderer, sdl2.SDL_PIXELFORMAT_RGB24, sdl2.SDL_TEXTUREACCESS_STREAMING, self.width, self.height) if self.fullscreen: sdl2.SDL_SetWindowFullscreen(self.window.window, sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP) self.clear_input() # drop old frames def work(self, frame): while self.input_queue.qsize() > self.batch_size: # print("dropping frame") frame = self.input_queue.get() # Event handling while sdl2.SDL_PollEvent(ctypes.byref(self.event)): if self.event.type == sdl2.SDL_QUIT: self.should_exit = True elif self.event.type == sdl2.SDL_KEYDOWN: keysym = self.event.key.keysym.sym if keysym == sdl2.SDLK_f: self.fullscreen = not self.fullscreen mode = sdl2.SDL_WINDOW_FULLSCREEN_DESKTOP if self.fullscreen else 0 sdl2.SDL_SetWindowFullscreen(self.window.window, mode) # Update texture image_data = (frame * 255).astype(np.uint8) sdl2.SDL_UpdateTexture(self.texture, None, image_data.ctypes.data, self.width * self.channels) # Render noise on screen sdl2.SDL_RenderClear(self.renderer.sdlrenderer) for i in range(self.frame_repeat): sdl2.SDL_RenderCopy(self.renderer.sdlrenderer, self.texture, None, None) sdl2.SDL_RenderPresent(self.renderer.sdlrenderer) # Renderer will now wait for vsync def cleanup(self): sdl2.SDL_DestroyTexture(self.texture) sdl2.ext.quit() settings = Settings()
settings_api = SettingsAPI(settings)
3
2023-12-05 12:32:28+00:00
8k
wusize/CLIM
src/open_clip/eva_clip/eva_vit_model.py
[ { "identifier": "PatchDropout", "path": "src/open_clip/eva_clip/transformer.py", "snippet": "class PatchDropout(nn.Module):\n \"\"\"\n https://arxiv.org/abs/2212.00794\n \"\"\"\n\n def __init__(self, prob, exclude_first_token=True):\n super().__init__()\n assert 0 <= prob < 1.\n self.prob = prob\n self.exclude_first_token = exclude_first_token # exclude CLS token\n logging.info(f\"os.getenv('RoPE')={os.getenv('RoPE')}\")\n\n def forward(self, x):\n if not self.training or self.prob == 0.:\n return x\n\n if self.exclude_first_token:\n cls_tokens, x = x[:, :1], x[:, 1:]\n else:\n cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])\n\n batch = x.size()[0]\n num_tokens = x.size()[1]\n\n batch_indices = torch.arange(batch)\n batch_indices = batch_indices[..., None]\n\n keep_prob = 1 - self.prob\n num_patches_keep = max(1, int(num_tokens * keep_prob))\n\n rand = torch.randn(batch, num_tokens)\n patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices\n\n x = x[batch_indices, patch_indices_keep]\n\n if self.exclude_first_token:\n x = torch.cat((cls_tokens, x), dim=1)\n\n if self.training and os.getenv('RoPE') == '1':\n return x, patch_indices_keep\n\n return x" }, { "identifier": "VisionRotaryEmbedding", "path": "src/open_clip/eva_clip/rope.py", "snippet": "class VisionRotaryEmbedding(nn.Module):\n def __init__(\n self,\n dim,\n pt_seq_len,\n ft_seq_len=None,\n custom_freqs = None,\n freqs_for = 'lang',\n theta = 10000,\n max_freq = 10,\n num_freqs = 1,\n ):\n super().__init__()\n self.ft_seq_len = ft_seq_len\n if custom_freqs:\n freqs = custom_freqs\n elif freqs_for == 'lang':\n freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))\n elif freqs_for == 'pixel':\n freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi\n elif freqs_for == 'constant':\n freqs = torch.ones(num_freqs).float()\n else:\n raise ValueError(f'unknown modality {freqs_for}')\n\n if ft_seq_len is None: ft_seq_len = pt_seq_len\n t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len\n\n freqs_h = torch.einsum('..., f -> ... f', t, freqs)\n freqs_h = repeat(freqs_h, '... n -> ... (n r)', r = 2)\n\n freqs_w = torch.einsum('..., f -> ... f', t, freqs)\n freqs_w = repeat(freqs_w, '... n -> ... (n r)', r = 2)\n\n freqs = broadcat((freqs_h[:, None, :], freqs_w[None, :, :]), dim = -1) \n\n self.register_buffer(\"freqs_cos\", freqs.cos())\n self.register_buffer(\"freqs_sin\", freqs.sin())\n\n logging.info(f'Shape of rope freq: {self.freqs_cos.shape}')\n\n def interpolate_freq(self, t_len, freq):\n if t_len == self.ft_seq_len ** 2:\n return freq\n tar_size = int(t_len ** 0.5)\n freq = freq.view(1, self.ft_seq_len, self.ft_seq_len, freq.shape[-1]).permute(0, 3, 1, 2)\n freq = F.interpolate(freq, (tar_size, tar_size), mode='bicubic',\n align_corners=False).view(-1, t_len).T\n\n return freq\n\n def forward(self, t, start_index = 0):\n rot_dim = self.freqs_cos.shape[-1]\n end_index = start_index + rot_dim\n assert rot_dim <= t.shape[-1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}'\n t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]\n # t = (t * self.freqs_cos) + (rotate_half(t) * self.freqs_sin)\n\n t = (t * self.interpolate_freq(t.shape[2], self.freqs_cos)) \\\n + (rotate_half(t) * self.interpolate_freq(t.shape[2], self.freqs_sin))\n\n return torch.cat((t_left, t, t_right), dim = -1)" }, { "identifier": "VisionRotaryEmbeddingFast", "path": "src/open_clip/eva_clip/rope.py", "snippet": "class VisionRotaryEmbeddingFast(nn.Module):\n def __init__(\n self,\n dim,\n pt_seq_len,\n ft_seq_len=None,\n custom_freqs = None,\n freqs_for = 'lang',\n theta = 10000,\n max_freq = 10,\n num_freqs = 1,\n patch_dropout = 0.\n ):\n super().__init__()\n self.custom_freqs = custom_freqs\n self.pt_seq_len = pt_seq_len\n self.ft_seq_len = ft_seq_len\n self.freqs_for = freqs_for\n self.dim = dim\n self.theta = theta\n self.max_freq = max_freq\n self.num_freqs = num_freqs\n if custom_freqs:\n freqs = custom_freqs\n elif freqs_for == 'lang':\n freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))\n elif freqs_for == 'pixel':\n freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi\n elif freqs_for == 'constant':\n freqs = torch.ones(num_freqs).float()\n else:\n raise ValueError(f'unknown modality {freqs_for}')\n\n if ft_seq_len is None: ft_seq_len = pt_seq_len\n t = torch.arange(ft_seq_len) / ft_seq_len * pt_seq_len\n\n freqs = torch.einsum('..., f -> ... f', t, freqs)\n freqs = repeat(freqs, '... n -> ... (n r)', r = 2)\n freqs = broadcat((freqs[:, None, :], freqs[None, :, :]), dim = -1)\n\n freqs_cos = freqs.cos().view(-1, freqs.shape[-1])\n freqs_sin = freqs.sin().view(-1, freqs.shape[-1])\n\n self.patch_dropout = patch_dropout\n\n self.register_buffer(\"freqs_cos\", freqs_cos)\n self.register_buffer(\"freqs_sin\", freqs_sin)\n\n logging.info(f'Shape of rope freq: {self.freqs_cos.shape}')\n self.register_buffer(\"flag\", torch.tensor(0, dtype=torch.long),\n persistent=False)\n\n def forward(self, t, patch_indices_keep=None):\n if patch_indices_keep is not None:\n batch = t.size()[0]\n batch_indices = torch.arange(batch)\n batch_indices = batch_indices[..., None]\n\n freqs_cos = repeat(self.freqs_cos, 'i j -> n i m j', n=t.shape[0], m=t.shape[1])\n freqs_sin = repeat(self.freqs_sin, 'i j -> n i m j', n=t.shape[0], m=t.shape[1])\n\n freqs_cos = freqs_cos[batch_indices, patch_indices_keep]\n freqs_cos = rearrange(freqs_cos, 'n i m j -> n m i j')\n freqs_sin = freqs_sin[batch_indices, patch_indices_keep]\n freqs_sin = rearrange(freqs_sin, 'n i m j -> n m i j')\n\n return t * freqs_cos + rotate_half(t) * freqs_sin\n freqs_cos, freqs_sin = self.recalculate(t)\n return t * freqs_cos + rotate_half(t) * freqs_sin\n # return t * self.freqs_cos + rotate_half(t) * self.freqs_sin\n # return t * self.interpolate_freq(t.shape[2], self.freqs_cos) \\\n # + rotate_half(t) * self.interpolate_freq(t.shape[2], self.freqs_sin)\n\n def interpolate_freq(self, t_len, freq):\n if t_len == self.ft_seq_len ** 2:\n return freq\n tar_size = int(t_len ** 0.5)\n freq = freq.view(1, self.ft_seq_len, self.ft_seq_len, freq.shape[-1]).permute(0, 3, 1, 2)\n freq = F.interpolate(freq, (tar_size, tar_size), mode='bicubic',\n align_corners=False).view(-1, t_len).T\n\n return freq\n\n def recalculate(self, x):\n # TODO: fix it, do not calculate it every time\n x_len = x.shape[2]\n if x_len == self.ft_seq_len ** 2:\n return self.freqs_cos, self.freqs_sin\n elif hasattr(self, f\"freqs_cos_{x_len}\"):\n return getattr(self, f\"freqs_cos_{x_len}\"), getattr(self, f\"freqs_sin_{x_len}\")\n assert self.flag <= 4\n ft_seq_len = int(x_len ** 0.5)\n if self.custom_freqs:\n freqs = self.custom_freqs\n elif self.freqs_for == 'lang':\n freqs = 1. / (self.theta ** (torch.arange(0, self.dim, 2)[:(self.dim // 2)].float() / self.dim))\n elif self.freqs_for == 'pixel':\n freqs = torch.linspace(1., self.max_freq / 2, self.dim // 2) * pi\n elif self.freqs_for == 'constant':\n freqs = torch.ones(self.num_freqs).float()\n else:\n raise ValueError(f'unknown modality {self.freqs_for}')\n\n t = torch.arange(ft_seq_len) / ft_seq_len * self.pt_seq_len\n\n freqs = torch.einsum('..., f -> ... f', t, freqs)\n freqs = repeat(freqs, '... n -> ... (n r)', r = 2)\n freqs = broadcat((freqs[:, None, :], freqs[None, :, :]), dim = -1)\n\n freqs_cos = freqs.cos().view(-1, freqs.shape[-1]).to(x)\n freqs_sin = freqs.sin().view(-1, freqs.shape[-1]).to(x)\n # TODO this is just a workaround\n self.register_buffer(f\"freqs_cos_{x_len}\", freqs_cos, persistent=False)\n self.register_buffer(f\"freqs_sin_{x_len}\", freqs_sin, persistent=False)\n self.flag.data += 1\n logging.info(f'Add a new rope freq of shape: {freqs_cos.shape}')\n print(f'Add a new rope freq of shape: {freqs_cos.shape}', flush=True)\n\n return freqs_cos, freqs_sin" } ]
import math import os import torch import torch.nn as nn import torch.nn.functional as F import xformers.ops as xops from functools import partial from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.layers import drop_path, to_2tuple, trunc_normal_ from .transformer import PatchDropout from .rope import VisionRotaryEmbedding, VisionRotaryEmbeddingFast from torchvision.ops import roi_align from deepspeed.runtime.activation_checkpointing.checkpointing import checkpoint from torch.utils.checkpoint import checkpoint from torch.utils.checkpoint import checkpoint from typing import Sequence
6,669
x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x))) else: x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x def forward_without_attn(self, x): if self.gamma_1 is None: if self.postnorm: x = x + self.drop_path(self.norm1(self.attn.proj_without_attn(x))) x = x + self.drop_path(self.norm2(self.mlp(x))) else: x = x + self.drop_path(self.attn.proj_without_attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) else: if self.postnorm: x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn.proj_without_attn(x))) x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x))) else: x = x + self.drop_path(self.gamma_1 * self.attn.proj_without_attn(self.norm1(x))) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x class PatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x, **kwargs): B, C, H, W = x.shape # FIXME look at relaxing size constraints # assert H == self.img_size[0] and W == self.img_size[1], \ # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x).flatten(2).transpose(1, 2) return x class RelativePositionBias(nn.Module): def __init__(self, window_size, num_heads): super().__init__() self.window_size = window_size self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter( torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window coords_h = torch.arange(window_size[0]) coords_w = torch.arange(window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = \ torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww relative_position_index[0, 0:] = self.num_relative_distance - 3 relative_position_index[0:, 0] = self.num_relative_distance - 2 relative_position_index[0, 0] = self.num_relative_distance - 1 self.register_buffer("relative_position_index", relative_position_index) def forward(self): relative_position_bias = \ self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww class EVAVisionTransformer(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, patch_dropout=0., use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, rope=False, use_mean_pooling=True, init_scale=0.001, grad_checkpointing=False, xattn=False, postnorm=False, pt_hw_seq_len=16, intp_freq=False, naiveswiglu=False, subln=False): super().__init__() self.image_size = img_size self.num_heads = num_heads self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None if rope: half_head_dim = embed_dim // num_heads // 2 hw_seq_len = img_size // patch_size
# -------------------------------------------------------- # Adapted from https://github.com/microsoft/unilm/tree/master/beit # -------------------------------------------------------- try: except: if os.getenv('ENV_TYPE') == 'deepspeed': try: except: else: try: except ImportError: xops = None print("Please 'pip install xformers'") class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class Mlp(nn.Module): def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, drop=0., subln=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) # x = self.drop(x) # commit this for the orignal BERT implement x = self.ffn_ln(x) x = self.fc2(x) x = self.drop(x) return x class SwiGLU(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.SiLU, drop=0., norm_layer=nn.LayerNorm, subln=False): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.w1 = nn.Linear(in_features, hidden_features) self.w2 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.ffn_ln = norm_layer(hidden_features) if subln else nn.Identity() self.w3 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x1 = self.w1(x) x2 = self.w2(x) hidden = self.act(x1) * x2 x = self.ffn_ln(hidden) x = self.w3(x) x = self.drop(x) return x class Attention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., window_size=None, attn_head_dim=None, xattn=False, rope=None, subln=False, norm_layer=nn.LayerNorm): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim all_head_dim = head_dim * self.num_heads self.scale = qk_scale or head_dim ** -0.5 self.subln = subln if self.subln: self.q_proj = nn.Linear(dim, all_head_dim, bias=False) self.k_proj = nn.Linear(dim, all_head_dim, bias=False) self.v_proj = nn.Linear(dim, all_head_dim, bias=False) else: self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) else: self.q_bias = None self.v_bias = None if window_size: self.window_size = window_size self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter( torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window coords_h = torch.arange(window_size[0]) coords_w = torch.arange(window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = \ torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww relative_position_index[0, 0:] = self.num_relative_distance - 3 relative_position_index[0:, 0] = self.num_relative_distance - 2 relative_position_index[0, 0] = self.num_relative_distance - 1 self.register_buffer("relative_position_index", relative_position_index) else: self.window_size = None self.relative_position_bias_table = None self.relative_position_index = None self.attn_drop = nn.Dropout(attn_drop) self.inner_attn_ln = norm_layer(all_head_dim) if subln else nn.Identity() # self.proj = nn.Linear(all_head_dim, all_head_dim) self.proj = nn.Linear(all_head_dim, dim) self.proj_drop = nn.Dropout(proj_drop) self.xattn = xattn self.xattn_drop = attn_drop self.rope = rope def forward(self, x, rel_pos_bias=None, attn_mask=None): B, N, C = x.shape if self.subln: q = F.linear(input=x, weight=self.q_proj.weight, bias=self.q_bias) k = F.linear(input=x, weight=self.k_proj.weight, bias=None) v = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias) q = q.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) # B, num_heads, N, C k = k.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) v = v.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3) else: qkv_bias = None if self.q_bias is not None: qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) # 3, B, num_heads, N, C q, k, v = qkv[0], qkv[1], qkv[2] if self.rope: if attn_mask is not None: attn_mask = attn_mask.to(q) # slightly fast impl q_t = q[:, :, 1:, :] ro_q_t = self.rope(q_t) q = torch.cat((q[:, :, :1, :], ro_q_t), -2).type_as(v) k_t = k[:, :, 1:, :] ro_k_t = self.rope(k_t) k = torch.cat((k[:, :, :1, :], ro_k_t), -2).type_as(v) if self.xattn: q = q.permute(0, 2, 1, 3) # B, num_heads, N, C -> B, N, num_heads, C k = k.permute(0, 2, 1, 3) v = v.permute(0, 2, 1, 3) x = xops.memory_efficient_attention( q, k, v, p=self.xattn_drop, scale=self.scale, attn_bias=attn_mask # to allow masked attention ) x = x.reshape(B, N, -1) x = self.inner_attn_ln(x) x = self.proj(x) x = self.proj_drop(x) else: q = q * self.scale attn = (q @ k.transpose(-2, -1)) if self.relative_position_bias_table is not None: relative_position_bias = \ self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0).type_as(attn) if rel_pos_bias is not None: attn = attn + rel_pos_bias.type_as(attn) if attn_mask is not None: attn_mask = attn_mask.bool() attn = attn.masked_fill(~attn_mask[:, None, None, :], float("-inf")) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, -1) x = self.inner_attn_ln(x) x = self.proj(x) x = self.proj_drop(x) return x def proj_without_attn(self, x): x = F.linear(input=x, weight=self.v_proj.weight, bias=self.v_bias) # B, num_heads, C x = self.inner_attn_ln(x) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, window_size=None, attn_head_dim=None, xattn=False, rope=None, postnorm=False, subln=False, naiveswiglu=False): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim, xattn=xattn, rope=rope, subln=subln, norm_layer=norm_layer) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) if naiveswiglu: self.mlp = SwiGLU( in_features=dim, hidden_features=mlp_hidden_dim, subln=subln, norm_layer=norm_layer, ) else: self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, subln=subln, drop=drop ) if init_values is not None and init_values > 0: self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) else: self.gamma_1, self.gamma_2 = None, None self.postnorm = postnorm def forward(self, x, rel_pos_bias=None, attn_mask=None): if self.gamma_1 is None: if self.postnorm: x = x + self.drop_path(self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))) x = x + self.drop_path(self.norm2(self.mlp(x))) else: x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)) x = x + self.drop_path(self.mlp(self.norm2(x))) else: if self.postnorm: x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn(x, rel_pos_bias=rel_pos_bias, attn_mask=attn_mask))) x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x))) else: x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias, attn_mask=attn_mask)) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x def forward_without_attn(self, x): if self.gamma_1 is None: if self.postnorm: x = x + self.drop_path(self.norm1(self.attn.proj_without_attn(x))) x = x + self.drop_path(self.norm2(self.mlp(x))) else: x = x + self.drop_path(self.attn.proj_without_attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) else: if self.postnorm: x = x + self.drop_path(self.gamma_1 * self.norm1(self.attn.proj_without_attn(x))) x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x))) else: x = x + self.drop_path(self.gamma_1 * self.attn.proj_without_attn(self.norm1(x))) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x class PatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x, **kwargs): B, C, H, W = x.shape # FIXME look at relaxing size constraints # assert H == self.img_size[0] and W == self.img_size[1], \ # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x).flatten(2).transpose(1, 2) return x class RelativePositionBias(nn.Module): def __init__(self, window_size, num_heads): super().__init__() self.window_size = window_size self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter( torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window coords_h = torch.arange(window_size[0]) coords_w = torch.arange(window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = \ torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww relative_position_index[0, 0:] = self.num_relative_distance - 3 relative_position_index[0:, 0] = self.num_relative_distance - 2 relative_position_index[0, 0] = self.num_relative_distance - 1 self.register_buffer("relative_position_index", relative_position_index) def forward(self): relative_position_bias = \ self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww class EVAVisionTransformer(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, patch_dropout=0., use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, rope=False, use_mean_pooling=True, init_scale=0.001, grad_checkpointing=False, xattn=False, postnorm=False, pt_hw_seq_len=16, intp_freq=False, naiveswiglu=False, subln=False): super().__init__() self.image_size = img_size self.num_heads = num_heads self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None if rope: half_head_dim = embed_dim // num_heads // 2 hw_seq_len = img_size // patch_size
self.rope = VisionRotaryEmbeddingFast(
2
2023-12-09 05:43:08+00:00
8k
moonshot-admin/moonshot
third-party/tqdm-4.66.1/tqdm/std.py
[ { "identifier": "TMonitor", "path": "third-party/tqdm-4.66.1/tqdm/_monitor.py", "snippet": "class TMonitor(Thread):\n \"\"\"\n Monitoring thread for tqdm bars.\n Monitors if tqdm bars are taking too much time to display\n and readjusts miniters automatically if necessary.\n\n Parameters\n ----------\n tqdm_cls : class\n tqdm class to use (can be core tqdm or a submodule).\n sleep_interval : float\n Time to sleep between monitoring checks.\n \"\"\"\n _test = {} # internal vars for unit testing\n\n def __init__(self, tqdm_cls, sleep_interval):\n Thread.__init__(self)\n self.daemon = True # kill thread when main killed (KeyboardInterrupt)\n self.woken = 0 # last time woken up, to sync with monitor\n self.tqdm_cls = tqdm_cls\n self.sleep_interval = sleep_interval\n self._time = self._test.get(\"time\", time)\n self.was_killed = self._test.get(\"Event\", Event)()\n atexit.register(self.exit)\n self.start()\n\n def exit(self):\n self.was_killed.set()\n if self is not current_thread():\n self.join()\n return self.report()\n\n def get_instances(self):\n # returns a copy of started `tqdm_cls` instances\n return [i for i in self.tqdm_cls._instances.copy()\n # Avoid race by checking that the instance started\n if hasattr(i, 'start_t')]\n\n def run(self):\n cur_t = self._time()\n while True:\n # After processing and before sleeping, notify that we woke\n # Need to be done just before sleeping\n self.woken = cur_t\n # Sleep some time...\n self.was_killed.wait(self.sleep_interval)\n # Quit if killed\n if self.was_killed.is_set():\n return\n # Then monitor!\n # Acquire lock (to access _instances)\n with self.tqdm_cls.get_lock():\n cur_t = self._time()\n # Check tqdm instances are waiting too long to print\n instances = self.get_instances()\n for instance in instances:\n # Check event in loop to reduce blocking time on exit\n if self.was_killed.is_set():\n return\n # Only if mininterval > 1 (else iterations are just slow)\n # and last refresh exceeded maxinterval\n if (\n instance.miniters > 1\n and (cur_t - instance.last_print_t) >= instance.maxinterval\n ):\n # force bypassing miniters on next iteration\n # (dynamic_miniters adjusts mininterval automatically)\n instance.miniters = 1\n # Refresh now! (works only for manual tqdm)\n instance.refresh(nolock=True)\n # Remove accidental long-lived strong reference\n del instance\n if instances != self.get_instances(): # pragma: nocover\n warn(\"Set changed size during iteration\" +\n \" (see https://github.com/tqdm/tqdm/issues/481)\",\n TqdmSynchronisationWarning, stacklevel=2)\n # Remove accidental long-lived strong references\n del instances\n\n def report(self):\n return not self.was_killed.is_set()" }, { "identifier": "CallbackIOWrapper", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "class CallbackIOWrapper(ObjectWrapper):\n def __init__(self, callback, stream, method=\"read\"):\n \"\"\"\n Wrap a given `file`-like object's `read()` or `write()` to report\n lengths to the given `callback`\n \"\"\"\n super(CallbackIOWrapper, self).__init__(stream)\n func = getattr(stream, method)\n if method == \"write\":\n @wraps(func)\n def write(data, *args, **kwargs):\n res = func(data, *args, **kwargs)\n callback(len(data))\n return res\n self.wrapper_setattr('write', write)\n elif method == \"read\":\n @wraps(func)\n def read(*args, **kwargs):\n data = func(*args, **kwargs)\n callback(len(data))\n return data\n self.wrapper_setattr('read', read)\n else:\n raise KeyError(\"Can only wrap read/write methods\")" }, { "identifier": "Comparable", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "class Comparable(object):\n \"\"\"Assumes child has self._comparable attr/@property\"\"\"\n def __lt__(self, other):\n return self._comparable < other._comparable\n\n def __le__(self, other):\n return (self < other) or (self == other)\n\n def __eq__(self, other):\n return self._comparable == other._comparable\n\n def __ne__(self, other):\n return not self == other\n\n def __gt__(self, other):\n return not self <= other\n\n def __ge__(self, other):\n return not self < other" }, { "identifier": "DisableOnWriteError", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "class DisableOnWriteError(ObjectWrapper):\n \"\"\"\n Disable the given `tqdm_instance` upon `write()` or `flush()` errors.\n \"\"\"\n @staticmethod\n def disable_on_exception(tqdm_instance, func):\n \"\"\"\n Quietly set `tqdm_instance.miniters=inf` if `func` raises `errno=5`.\n \"\"\"\n tqdm_instance = proxy(tqdm_instance)\n\n def inner(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except OSError as e:\n if e.errno != 5:\n raise\n try:\n tqdm_instance.miniters = float('inf')\n except ReferenceError:\n pass\n except ValueError as e:\n if 'closed' not in str(e):\n raise\n try:\n tqdm_instance.miniters = float('inf')\n except ReferenceError:\n pass\n return inner\n\n def __init__(self, wrapped, tqdm_instance):\n super(DisableOnWriteError, self).__init__(wrapped)\n if hasattr(wrapped, 'write'):\n self.wrapper_setattr(\n 'write', self.disable_on_exception(tqdm_instance, wrapped.write))\n if hasattr(wrapped, 'flush'):\n self.wrapper_setattr(\n 'flush', self.disable_on_exception(tqdm_instance, wrapped.flush))\n\n def __eq__(self, other):\n return self._wrapped == getattr(other, '_wrapped', other)" }, { "identifier": "FormatReplace", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "class FormatReplace(object):\n \"\"\"\n >>> a = FormatReplace('something')\n >>> \"{:5d}\".format(a)\n 'something'\n \"\"\" # NOQA: P102\n def __init__(self, replace=''):\n self.replace = replace\n self.format_called = 0\n\n def __format__(self, _):\n self.format_called += 1\n return self.replace" }, { "identifier": "SimpleTextIOWrapper", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "class SimpleTextIOWrapper(ObjectWrapper):\n \"\"\"\n Change only `.write()` of the wrapped object by encoding the passed\n value and passing the result to the wrapped object's `.write()` method.\n \"\"\"\n # pylint: disable=too-few-public-methods\n def __init__(self, wrapped, encoding):\n super(SimpleTextIOWrapper, self).__init__(wrapped)\n self.wrapper_setattr('encoding', encoding)\n\n def write(self, s):\n \"\"\"\n Encode `s` and pass to the wrapped object's `.write()` method.\n \"\"\"\n return self._wrapped.write(s.encode(self.wrapper_getattr('encoding')))\n\n def __eq__(self, other):\n return self._wrapped == getattr(other, '_wrapped', other)" }, { "identifier": "_is_ascii", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "def _is_ascii(s):\n if isinstance(s, str):\n for c in s:\n if ord(c) > 255:\n return False\n return True\n return _supports_unicode(s)" }, { "identifier": "_screen_shape_wrapper", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "def _screen_shape_wrapper(): # pragma: no cover\n \"\"\"\n Return a function which returns console dimensions (width, height).\n Supported: linux, osx, windows, cygwin.\n \"\"\"\n _screen_shape = None\n if IS_WIN:\n _screen_shape = _screen_shape_windows\n if _screen_shape is None:\n _screen_shape = _screen_shape_tput\n if IS_NIX:\n _screen_shape = _screen_shape_linux\n return _screen_shape" }, { "identifier": "_supports_unicode", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "def _supports_unicode(fp):\n try:\n return _is_utf(fp.encoding)\n except AttributeError:\n return False" }, { "identifier": "_term_move_up", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "def _term_move_up(): # pragma: no cover\n return '' if (os.name == 'nt') and (colorama is None) else '\\x1b[A'" }, { "identifier": "disp_len", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "def disp_len(data):\n \"\"\"\n Returns the real on-screen length of a string which may contain\n ANSI control codes and wide chars.\n \"\"\"\n return _text_width(RE_ANSI.sub('', data))" }, { "identifier": "disp_trim", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "def disp_trim(data, length):\n \"\"\"\n Trim a string which may contain ANSI control characters.\n \"\"\"\n if len(data) == disp_len(data):\n return data[:length]\n\n ansi_present = bool(RE_ANSI.search(data))\n while disp_len(data) > length: # carefully delete one char at a time\n data = data[:-1]\n if ansi_present and bool(RE_ANSI.search(data)):\n # assume ANSI reset is required\n return data if data.endswith(\"\\033[0m\") else data + \"\\033[0m\"\n return data" }, { "identifier": "envwrap", "path": "third-party/tqdm-4.66.1/tqdm/utils.py", "snippet": "def envwrap(prefix, types=None, is_method=False):\n \"\"\"\n Override parameter defaults via `os.environ[prefix + param_name]`.\n Maps UPPER_CASE env vars map to lower_case param names.\n camelCase isn't supported (because Windows ignores case).\n\n Precedence (highest first):\n - call (`foo(a=3)`)\n - environ (`FOO_A=2`)\n - signature (`def foo(a=1)`)\n\n Parameters\n ----------\n prefix : str\n Env var prefix, e.g. \"FOO_\"\n types : dict, optional\n Fallback mappings `{'param_name': type, ...}` if types cannot be\n inferred from function signature.\n Consider using `types=collections.defaultdict(lambda: ast.literal_eval)`.\n is_method : bool, optional\n Whether to use `functools.partialmethod`. If (default: False) use `functools.partial`.\n\n Examples\n --------\n ```\n $ cat foo.py\n from tqdm.utils import envwrap\n @envwrap(\"FOO_\")\n def test(a=1, b=2, c=3):\n print(f\"received: a={a}, b={b}, c={c}\")\n\n $ FOO_A=42 FOO_C=1337 python -c 'import foo; foo.test(c=99)'\n received: a=42, b=2, c=99\n ```\n \"\"\"\n if types is None:\n types = {}\n i = len(prefix)\n env_overrides = {k[i:].lower(): v for k, v in os.environ.items() if k.startswith(prefix)}\n part = partialmethod if is_method else partial\n\n def wrap(func):\n params = signature(func).parameters\n # ignore unknown env vars\n overrides = {k: v for k, v in env_overrides.items() if k in params}\n # infer overrides' `type`s\n for k in overrides:\n param = params[k]\n if param.annotation is not param.empty: # typehints\n for typ in getattr(param.annotation, '__args__', (param.annotation,)):\n try:\n overrides[k] = typ(overrides[k])\n except Exception:\n pass\n else:\n break\n elif param.default is not None: # type of default value\n overrides[k] = type(param.default)(overrides[k])\n else:\n try: # `types` fallback\n overrides[k] = types[k](overrides[k])\n except KeyError: # keep unconverted (`str`)\n pass\n return part(func, **overrides)\n return wrap" } ]
import sys from collections import OrderedDict, defaultdict from contextlib import contextmanager from datetime import datetime, timedelta from numbers import Number from time import time from warnings import warn from weakref import WeakSet from ._monitor import TMonitor from .utils import ( CallbackIOWrapper, Comparable, DisableOnWriteError, FormatReplace, SimpleTextIOWrapper, _is_ascii, _screen_shape_wrapper, _supports_unicode, _term_move_up, disp_len, disp_trim, envwrap) from threading import RLock from multiprocessing import RLock from warnings import catch_warnings, simplefilter from pandas.core.frame import DataFrame from pandas.core.series import Series from pandas import Panel from pandas.core.window.rolling import _Rolling_and_Expanding from pandas.core.window import _Rolling_and_Expanding from pandas.core.window.expanding import Expanding from pandas.core.window.rolling import Rolling from pandas.core.groupby.generic import SeriesGroupBy # , NDFrameGroupBy from pandas.core.groupby.generic import DataFrameGroupBy from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy from pandas.core.groupby.groupby import GroupBy from pandas.core.groupby import GroupBy from pandas.core.groupby.groupby import PanelGroupBy from pandas.core.groupby import PanelGroupBy from pandas.core.common import is_builtin_func
5,983
tqdm_kwargs = tqdm_kwargs.copy() deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)] def inner_generator(df_function='apply'): def inner(df, func, *args, **kwargs): """ Parameters ---------- df : (DataFrame|Series)[GroupBy] Data (may be grouped). func : function To be applied on the (grouped) data. **kwargs : optional Transmitted to `df.apply()`. """ # Precompute total iterations total = tqdm_kwargs.pop("total", getattr(df, 'ngroups', None)) if total is None: # not grouped if df_function == 'applymap': total = df.size elif isinstance(df, Series): total = len(df) elif (_Rolling_and_Expanding is None or not isinstance(df, _Rolling_and_Expanding)): # DataFrame or Panel axis = kwargs.get('axis', 0) if axis == 'index': axis = 0 elif axis == 'columns': axis = 1 # when axis=0, total is shape[axis1] total = df.size // df.shape[axis] # Init bar if deprecated_t[0] is not None: t = deprecated_t[0] deprecated_t[0] = None else: t = cls(total=total, **tqdm_kwargs) if len(args) > 0: # *args intentionally not supported (see #244, #299) TqdmDeprecationWarning( "Except func, normal arguments are intentionally" + " not supported by" + " `(DataFrame|Series|GroupBy).progress_apply`." + " Use keyword arguments instead.", fp_write=getattr(t.fp, 'write', sys.stderr.write)) try: # pandas>=1.3.0 except ImportError: is_builtin_func = df._is_builtin_func try: func = is_builtin_func(func) except TypeError: pass # Define bar updating wrapper def wrapper(*args, **kwargs): # update tbar correctly # it seems `pandas apply` calls `func` twice # on the first column/row to decide whether it can # take a fast or slow code path; so stop when t.total==t.n t.update(n=1 if not t.total or t.n < t.total else 0) return func(*args, **kwargs) # Apply the provided function (in **kwargs) # on the df using our wrapper (which provides bar updating) try: return getattr(df, df_function)(wrapper, **kwargs) finally: t.close() return inner # Monkeypatch pandas to provide easy methods # Enable custom tqdm progress in pandas! Series.progress_apply = inner_generator() SeriesGroupBy.progress_apply = inner_generator() Series.progress_map = inner_generator('map') SeriesGroupBy.progress_map = inner_generator('map') DataFrame.progress_apply = inner_generator() DataFrameGroupBy.progress_apply = inner_generator() DataFrame.progress_applymap = inner_generator('applymap') if Panel is not None: Panel.progress_apply = inner_generator() if PanelGroupBy is not None: PanelGroupBy.progress_apply = inner_generator() GroupBy.progress_apply = inner_generator() GroupBy.progress_aggregate = inner_generator('aggregate') GroupBy.progress_transform = inner_generator('transform') if Rolling is not None and Expanding is not None: Rolling.progress_apply = inner_generator() Expanding.progress_apply = inner_generator() elif _Rolling_and_Expanding is not None: _Rolling_and_Expanding.progress_apply = inner_generator() # override defaults via env vars @envwrap("TQDM_", is_method=True, types={'total': float, 'ncols': int, 'miniters': float, 'position': int, 'nrows': int}) def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=False, lock_args=None, nrows=None, colour=None, delay=0.0, gui=False, **kwargs): """see tqdm.tqdm for arguments""" if file is None: file = sys.stderr if write_bytes: # Despite coercing unicode into bytes, py2 sys.std* streams # should have bytes written to them.
""" Customisable progressbar decorator for iterators. Includes a default `range` iterator printing to `stderr`. Usage: >>> from tqdm import trange, tqdm >>> for i in trange(10): ... ... """ __author__ = "https://github.com/tqdm/tqdm#contributions" __all__ = ['tqdm', 'trange', 'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning', 'TqdmExperimentalWarning', 'TqdmDeprecationWarning', 'TqdmMonitorWarning'] class TqdmTypeError(TypeError): pass class TqdmKeyError(KeyError): pass class TqdmWarning(Warning): """base class for all tqdm warnings. Used for non-external-code-breaking errors, such as garbled printing. """ def __init__(self, msg, fp_write=None, *a, **k): if fp_write is not None: fp_write("\n" + self.__class__.__name__ + ": " + str(msg).rstrip() + '\n') else: super(TqdmWarning, self).__init__(msg, *a, **k) class TqdmExperimentalWarning(TqdmWarning, FutureWarning): """beta feature, unstable API and behaviour""" pass class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning): # not suppressed if raised pass class TqdmMonitorWarning(TqdmWarning, RuntimeWarning): """tqdm monitor errors which do not affect external functionality""" pass def TRLock(*args, **kwargs): """threading RLock""" try: return RLock(*args, **kwargs) except (ImportError, OSError): # pragma: no cover pass class TqdmDefaultWriteLock(object): """ Provide a default write lock for thread and multiprocessing safety. Works only on platforms supporting `fork` (so Windows is excluded). You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance before forking in order for the write lock to work. On Windows, you need to supply the lock from the parent to the children as an argument to joblib or the parallelism lib you use. """ # global thread lock so no setup required for multithreading. # NB: Do not create multiprocessing lock as it sets the multiprocessing # context, disallowing `spawn()`/`forkserver()` th_lock = TRLock() def __init__(self): # Create global parallelism locks to avoid racing issues with parallel # bars works only if fork available (Linux/MacOSX, but not Windows) cls = type(self) root_lock = cls.th_lock if root_lock is not None: root_lock.acquire() cls.create_mp_lock() self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None] if root_lock is not None: root_lock.release() def acquire(self, *a, **k): for lock in self.locks: lock.acquire(*a, **k) def release(self): for lock in self.locks[::-1]: # Release in inverse order of acquisition lock.release() def __enter__(self): self.acquire() def __exit__(self, *exc): self.release() @classmethod def create_mp_lock(cls): if not hasattr(cls, 'mp_lock'): try: cls.mp_lock = RLock() except (ImportError, OSError): # pragma: no cover cls.mp_lock = None @classmethod def create_th_lock(cls): assert hasattr(cls, 'th_lock') warn("create_th_lock not needed anymore", TqdmDeprecationWarning, stacklevel=2) class Bar(object): """ `str.format`-able bar with format specifiers: `[width][type]` - `width` + unspecified (default): use `self.default_len` + `int >= 0`: overrides `self.default_len` + `int < 0`: subtract from `self.default_len` - `type` + `a`: ascii (`charset=self.ASCII` override) + `u`: unicode (`charset=self.UTF` override) + `b`: blank (`charset=" "` override) """ ASCII = " 123456789#" UTF = u" " + u''.join(map(chr, range(0x258F, 0x2587, -1))) BLANK = " " COLOUR_RESET = '\x1b[0m' COLOUR_RGB = '\x1b[38;2;%d;%d;%dm' COLOURS = {'BLACK': '\x1b[30m', 'RED': '\x1b[31m', 'GREEN': '\x1b[32m', 'YELLOW': '\x1b[33m', 'BLUE': '\x1b[34m', 'MAGENTA': '\x1b[35m', 'CYAN': '\x1b[36m', 'WHITE': '\x1b[37m'} def __init__(self, frac, default_len=10, charset=UTF, colour=None): if not 0 <= frac <= 1: warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2) frac = max(0, min(1, frac)) assert default_len > 0 self.frac = frac self.default_len = default_len self.charset = charset self.colour = colour @property def colour(self): return self._colour @colour.setter def colour(self, value): if not value: self._colour = None return try: if value.upper() in self.COLOURS: self._colour = self.COLOURS[value.upper()] elif value[0] == '#' and len(value) == 7: self._colour = self.COLOUR_RGB % tuple( int(i, 16) for i in (value[1:3], value[3:5], value[5:7])) else: raise KeyError except (KeyError, AttributeError): warn("Unknown colour (%s); valid choices: [hex (#00ff00), %s]" % ( value, ", ".join(self.COLOURS)), TqdmWarning, stacklevel=2) self._colour = None def __format__(self, format_spec): if format_spec: _type = format_spec[-1].lower() try: charset = {'a': self.ASCII, 'u': self.UTF, 'b': self.BLANK}[_type] except KeyError: charset = self.charset else: format_spec = format_spec[:-1] if format_spec: N_BARS = int(format_spec) if N_BARS < 0: N_BARS += self.default_len else: N_BARS = self.default_len else: charset = self.charset N_BARS = self.default_len nsyms = len(charset) - 1 bar_length, frac_bar_length = divmod(int(self.frac * N_BARS * nsyms), nsyms) res = charset[-1] * bar_length if bar_length < N_BARS: # whitespace padding res = res + charset[frac_bar_length] + charset[0] * (N_BARS - bar_length - 1) return self.colour + res + self.COLOUR_RESET if self.colour else res class EMA(object): """ Exponential moving average: smoothing to give progressively lower weights to older values. Parameters ---------- smoothing : float, optional Smoothing factor in range [0, 1], [default: 0.3]. Increase to give more weight to recent values. Ranges from 0 (yields old value) to 1 (yields new value). """ def __init__(self, smoothing=0.3): self.alpha = smoothing self.last = 0 self.calls = 0 def __call__(self, x=None): """ Parameters ---------- x : float New value to include in EMA. """ beta = 1 - self.alpha if x is not None: self.last = self.alpha * x + beta * self.last self.calls += 1 return self.last / (1 - beta ** self.calls) if self.calls else self.last class tqdm(Comparable): """ Decorate an iterable object, returning an iterator which acts exactly like the original iterable, but prints a dynamically updating progressbar every time a value is requested. Parameters ---------- iterable : iterable, optional Iterable to decorate with a progressbar. Leave blank to manually manage the updates. desc : str, optional Prefix for the progressbar. total : int or float, optional The number of expected iterations. If unspecified, len(iterable) is used if possible. If float("inf") or as a last resort, only basic progress statistics are displayed (no ETA, no progressbar). If `gui` is True and this parameter needs subsequent updating, specify an initial arbitrary large positive number, e.g. 9e9. leave : bool, optional If [default: True], keeps all traces of the progressbar upon termination of iteration. If `None`, will leave only if `position` is `0`. file : `io.TextIOWrapper` or `io.StringIO`, optional Specifies where to output the progress messages (default: sys.stderr). Uses `file.write(str)` and `file.flush()` methods. For encoding, see `write_bytes`. ncols : int, optional The width of the entire output message. If specified, dynamically resizes the progressbar to stay within this bound. If unspecified, attempts to use environment width. The fallback is a meter width of 10 and no limit for the counter and statistics. If 0, will not print any meter (only stats). mininterval : float, optional Minimum progress display update interval [default: 0.1] seconds. maxinterval : float, optional Maximum progress display update interval [default: 10] seconds. Automatically adjusts `miniters` to correspond to `mininterval` after long display update lag. Only works if `dynamic_miniters` or monitor thread is enabled. miniters : int or float, optional Minimum progress display update interval, in iterations. If 0 and `dynamic_miniters`, will automatically adjust to equal `mininterval` (more CPU efficient, good for tight loops). If > 0, will skip display of specified number of iterations. Tweak this and `mininterval` to get very efficient loops. If your progress is erratic with both fast and slow iterations (network, skipping items, etc) you should set miniters=1. ascii : bool or str, optional If unspecified or False, use unicode (smooth blocks) to fill the meter. The fallback is to use ASCII characters " 123456789#". disable : bool, optional Whether to disable the entire progressbar wrapper [default: False]. If set to None, disable on non-TTY. unit : str, optional String that will be used to define the unit of each iteration [default: it]. unit_scale : bool or int or float, optional If 1 or True, the number of iterations will be reduced/scaled automatically and a metric prefix following the International System of Units standard will be added (kilo, mega, etc.) [default: False]. If any other non-zero number, will scale `total` and `n`. dynamic_ncols : bool, optional If set, constantly alters `ncols` and `nrows` to the environment (allowing for window resizes) [default: False]. smoothing : float, optional Exponential moving average smoothing factor for speed estimates (ignored in GUI mode). Ranges from 0 (average speed) to 1 (current/instantaneous speed) [default: 0.3]. bar_format : str, optional Specify a custom bar string formatting. May impact performance. [default: '{l_bar}{bar}{r_bar}'], where l_bar='{desc}: {percentage:3.0f}%|' and r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, ' '{rate_fmt}{postfix}]' Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, percentage, elapsed, elapsed_s, ncols, nrows, desc, unit, rate, rate_fmt, rate_noinv, rate_noinv_fmt, rate_inv, rate_inv_fmt, postfix, unit_divisor, remaining, remaining_s, eta. Note that a trailing ": " is automatically removed after {desc} if the latter is empty. initial : int or float, optional The initial counter value. Useful when restarting a progress bar [default: 0]. If using float, consider specifying `{n:.3f}` or similar in `bar_format`, or specifying `unit_scale`. position : int, optional Specify the line offset to print this bar (starting from 0) Automatic if unspecified. Useful to manage multiple bars at once (eg, from threads). postfix : dict or *, optional Specify additional stats to display at the end of the bar. Calls `set_postfix(**postfix)` if possible (dict). unit_divisor : float, optional [default: 1000], ignored unless `unit_scale` is True. write_bytes : bool, optional Whether to write bytes. If (default: False) will write unicode. lock_args : tuple, optional Passed to `refresh` for intermediate output (initialisation, iterating, and updating). nrows : int, optional The screen height. If specified, hides nested bars outside this bound. If unspecified, attempts to use environment height. The fallback is 20. colour : str, optional Bar colour (e.g. 'green', '#00ff00'). delay : float, optional Don't display until [default: 0] seconds have elapsed. gui : bool, optional WARNING: internal parameter - do not use. Use tqdm.gui.tqdm(...) instead. If set, will attempt to use matplotlib animations for a graphical output [default: False]. Returns ------- out : decorated iterator. """ monitor_interval = 10 # set to 0 to disable the thread monitor = None _instances = WeakSet() @staticmethod def format_sizeof(num, suffix='', divisor=1000): """ Formats a number (greater than unity) with SI Order of Magnitude prefixes. Parameters ---------- num : float Number ( >= 1) to format. suffix : str, optional Post-postfix [default: '']. divisor : float, optional Divisor between prefixes [default: 1000]. Returns ------- out : str Number with Order of Magnitude SI unit postfix. """ for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 999.5: if abs(num) < 99.95: if abs(num) < 9.995: return '{0:1.2f}'.format(num) + unit + suffix return '{0:2.1f}'.format(num) + unit + suffix return '{0:3.0f}'.format(num) + unit + suffix num /= divisor return '{0:3.1f}Y'.format(num) + suffix @staticmethod def format_interval(t): """ Formats a number of seconds as a clock time, [H:]MM:SS Parameters ---------- t : int Number of seconds. Returns ------- out : str [H:]MM:SS """ mins, s = divmod(int(t), 60) h, m = divmod(mins, 60) if h: return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s) else: return '{0:02d}:{1:02d}'.format(m, s) @staticmethod def format_num(n): """ Intelligent scientific notation (.3g). Parameters ---------- n : int or float or Numeric A Number. Returns ------- out : str Formatted number. """ f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-') n = str(n) return f if len(f) < len(n) else n @staticmethod def status_printer(file): """ Manage the printing and in-place updating of a line of characters. Note that if the string is longer than a line, then in-place updating may not work (it will print a new line at each refresh). """ fp = file fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover if fp in (sys.stderr, sys.stdout): getattr(sys.stderr, 'flush', lambda: None)() getattr(sys.stdout, 'flush', lambda: None)() def fp_write(s): fp.write(str(s)) fp_flush() last_len = [0] def print_status(s): len_s = disp_len(s) fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0))) last_len[0] = len_s return print_status @staticmethod def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it', unit_scale=False, rate=None, bar_format=None, postfix=None, unit_divisor=1000, initial=0, colour=None, **extra_kwargs): """ Return a string-based progress bar given some parameters Parameters ---------- n : int or float Number of finished iterations. total : int or float The expected total number of iterations. If meaningless (None), only basic progress statistics are displayed (no ETA). elapsed : float Number of seconds passed since start. ncols : int, optional The width of the entire output message. If specified, dynamically resizes `{bar}` to stay within this bound [default: None]. If `0`, will not print any bar (only stats). The fallback is `{bar:10}`. prefix : str, optional Prefix message (included in total width) [default: '']. Use as {desc} in bar_format string. ascii : bool, optional or str, optional If not set, use unicode (smooth blocks) to fill the meter [default: False]. The fallback is to use ASCII characters " 123456789#". unit : str, optional The iteration unit [default: 'it']. unit_scale : bool or int or float, optional If 1 or True, the number of iterations will be printed with an appropriate SI metric prefix (k = 10^3, M = 10^6, etc.) [default: False]. If any other non-zero number, will scale `total` and `n`. rate : float, optional Manual override for iteration rate. If [default: None], uses n/elapsed. bar_format : str, optional Specify a custom bar string formatting. May impact performance. [default: '{l_bar}{bar}{r_bar}'], where l_bar='{desc}: {percentage:3.0f}%|' and r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, ' '{rate_fmt}{postfix}]' Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, percentage, elapsed, elapsed_s, ncols, nrows, desc, unit, rate, rate_fmt, rate_noinv, rate_noinv_fmt, rate_inv, rate_inv_fmt, postfix, unit_divisor, remaining, remaining_s, eta. Note that a trailing ": " is automatically removed after {desc} if the latter is empty. postfix : *, optional Similar to `prefix`, but placed at the end (e.g. for additional stats). Note: postfix is usually a string (not a dict) for this method, and will if possible be set to postfix = ', ' + postfix. However other types are supported (#382). unit_divisor : float, optional [default: 1000], ignored unless `unit_scale` is True. initial : int or float, optional The initial counter value [default: 0]. colour : str, optional Bar colour (e.g. 'green', '#00ff00'). Returns ------- out : Formatted meter and stats, ready to display. """ # sanity check: total if total and n >= (total + 0.5): # allow float imprecision (#849) total = None # apply custom scale if necessary if unit_scale and unit_scale not in (True, 1): if total: total *= unit_scale n *= unit_scale if rate: rate *= unit_scale # by default rate = self.avg_dn / self.avg_dt unit_scale = False elapsed_str = tqdm.format_interval(elapsed) # if unspecified, attempt to use rate = average speed # (we allow manual override since predicting time is an arcane art) if rate is None and elapsed: rate = (n - initial) / elapsed inv_rate = 1 / rate if rate else None format_sizeof = tqdm.format_sizeof rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else '{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s' rate_inv_fmt = ( (format_sizeof(inv_rate) if unit_scale else '{0:5.2f}'.format(inv_rate)) if inv_rate else '?') + 's/' + unit rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt if unit_scale: n_fmt = format_sizeof(n, divisor=unit_divisor) total_fmt = format_sizeof(total, divisor=unit_divisor) if total is not None else '?' else: n_fmt = str(n) total_fmt = str(total) if total is not None else '?' try: postfix = ', ' + postfix if postfix else '' except TypeError: pass remaining = (total - n) / rate if rate and total else 0 remaining_str = tqdm.format_interval(remaining) if rate else '?' try: eta_dt = (datetime.now() + timedelta(seconds=remaining) if rate and total else datetime.utcfromtimestamp(0)) except OverflowError: eta_dt = datetime.max # format the stats displayed to the left and right sides of the bar if prefix: # old prefix setup work around bool_prefix_colon_already = (prefix[-2:] == ": ") l_bar = prefix if bool_prefix_colon_already else prefix + ": " else: l_bar = '' r_bar = f'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}{postfix}]' # Custom bar formatting # Populate a dict with all available progress indicators format_dict = { # slight extension of self.format_dict 'n': n, 'n_fmt': n_fmt, 'total': total, 'total_fmt': total_fmt, 'elapsed': elapsed_str, 'elapsed_s': elapsed, 'ncols': ncols, 'desc': prefix or '', 'unit': unit, 'rate': inv_rate if inv_rate and inv_rate > 1 else rate, 'rate_fmt': rate_fmt, 'rate_noinv': rate, 'rate_noinv_fmt': rate_noinv_fmt, 'rate_inv': inv_rate, 'rate_inv_fmt': rate_inv_fmt, 'postfix': postfix, 'unit_divisor': unit_divisor, 'colour': colour, # plus more useful definitions 'remaining': remaining_str, 'remaining_s': remaining, 'l_bar': l_bar, 'r_bar': r_bar, 'eta': eta_dt, **extra_kwargs} # total is known: we can predict some stats if total: # fractional and percentage progress frac = n / total percentage = frac * 100 l_bar += '{0:3.0f}%|'.format(percentage) if ncols == 0: return l_bar[:-1] + r_bar[1:] format_dict.update(l_bar=l_bar) if bar_format: format_dict.update(percentage=percentage) # auto-remove colon for empty `{desc}` if not prefix: bar_format = bar_format.replace("{desc}: ", '') else: bar_format = "{l_bar}{bar}{r_bar}" full_bar = FormatReplace() nobar = bar_format.format(bar=full_bar, **format_dict) if not full_bar.format_called: return nobar # no `{bar}`; nothing else to do # Formatting progress bar space available for bar's display full_bar = Bar(frac, max(1, ncols - disp_len(nobar)) if ncols else 10, charset=Bar.ASCII if ascii is True else ascii or Bar.UTF, colour=colour) if not _is_ascii(full_bar.charset) and _is_ascii(bar_format): bar_format = str(bar_format) res = bar_format.format(bar=full_bar, **format_dict) return disp_trim(res, ncols) if ncols else res elif bar_format: # user-specified bar_format but no total l_bar += '|' format_dict.update(l_bar=l_bar, percentage=0) full_bar = FormatReplace() nobar = bar_format.format(bar=full_bar, **format_dict) if not full_bar.format_called: return nobar full_bar = Bar(0, max(1, ncols - disp_len(nobar)) if ncols else 10, charset=Bar.BLANK, colour=colour) res = bar_format.format(bar=full_bar, **format_dict) return disp_trim(res, ncols) if ncols else res else: # no total: no progressbar, ETA, just progress stats return (f'{(prefix + ": ") if prefix else ""}' f'{n_fmt}{unit} [{elapsed_str}, {rate_fmt}{postfix}]') def __new__(cls, *_, **__): instance = object.__new__(cls) with cls.get_lock(): # also constructs lock if non-existent cls._instances.add(instance) # create monitoring thread if cls.monitor_interval and (cls.monitor is None or not cls.monitor.report()): try: cls.monitor = TMonitor(cls, cls.monitor_interval) except Exception as e: # pragma: nocover warn("tqdm:disabling monitor support" " (monitor_interval = 0) due to:\n" + str(e), TqdmMonitorWarning, stacklevel=2) cls.monitor_interval = 0 return instance @classmethod def _get_free_pos(cls, instance=None): """Skips specified instance.""" positions = {abs(inst.pos) for inst in cls._instances if inst is not instance and hasattr(inst, "pos")} return min(set(range(len(positions) + 1)).difference(positions)) @classmethod def _decr_instances(cls, instance): """ Remove from list and reposition another unfixed bar to fill the new gap. This means that by default (where all nested bars are unfixed), order is not maintained but screen flicker/blank space is minimised. (tqdm<=4.44.1 moved ALL subsequent unfixed bars up.) """ with cls._lock: try: cls._instances.remove(instance) except KeyError: # if not instance.gui: # pragma: no cover # raise pass # py2: maybe magically removed already # else: if not instance.gui: last = (instance.nrows or 20) - 1 # find unfixed (`pos >= 0`) overflow (`pos >= nrows - 1`) instances = list(filter( lambda i: hasattr(i, "pos") and last <= i.pos, cls._instances)) # set first found to current `pos` if instances: inst = min(instances, key=lambda i: i.pos) inst.clear(nolock=True) inst.pos = abs(instance.pos) @classmethod def write(cls, s, file=None, end="\n", nolock=False): """Print a message via tqdm (without overlap with bars).""" fp = file if file is not None else sys.stdout with cls.external_write_mode(file=file, nolock=nolock): # Write the message fp.write(s) fp.write(end) @classmethod @contextmanager def external_write_mode(cls, file=None, nolock=False): """ Disable tqdm within context and refresh tqdm when exits. Useful when writing to standard output stream """ fp = file if file is not None else sys.stdout try: if not nolock: cls.get_lock().acquire() # Clear all bars inst_cleared = [] for inst in getattr(cls, '_instances', []): # Clear instance if in the target output file # or if write output + tqdm output are both either # sys.stdout or sys.stderr (because both are mixed in terminal) if hasattr(inst, "start_t") and (inst.fp == fp or all( f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))): inst.clear(nolock=True) inst_cleared.append(inst) yield # Force refresh display of bars we cleared for inst in inst_cleared: inst.refresh(nolock=True) finally: if not nolock: cls._lock.release() @classmethod def set_lock(cls, lock): """Set the global lock.""" cls._lock = lock @classmethod def get_lock(cls): """Get the global lock. Construct it if it does not exist.""" if not hasattr(cls, '_lock'): cls._lock = TqdmDefaultWriteLock() return cls._lock @classmethod def pandas(cls, **tqdm_kwargs): """ Registers the current `tqdm` class with pandas.core. ( frame.DataFrame | series.Series | groupby.(generic.)DataFrameGroupBy | groupby.(generic.)SeriesGroupBy ).progress_apply A new instance will be created every time `progress_apply` is called, and each instance will automatically `close()` upon completion. Parameters ---------- tqdm_kwargs : arguments for the tqdm instance Examples -------- >>> import pandas as pd >>> import numpy as np >>> from tqdm import tqdm >>> from tqdm.gui import tqdm as tqdm_gui >>> >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6))) >>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc >>> # Now you can use `progress_apply` instead of `apply` >>> df.groupby(0).progress_apply(lambda x: x**2) References ---------- <https://stackoverflow.com/questions/18603270/\ progress-indicator-during-pandas-operations-python> """ try: with catch_warnings(): simplefilter("ignore", category=FutureWarning) except ImportError: # pandas>=1.2.0 Panel = None Rolling, Expanding = None, None try: # pandas>=1.0.0 except ImportError: try: # pandas>=0.18.0 except ImportError: # pandas>=1.2.0 try: # pandas>=1.2.0 _Rolling_and_Expanding = Rolling, Expanding except ImportError: # pragma: no cover _Rolling_and_Expanding = None try: # pandas>=0.25.0 except ImportError: # pragma: no cover try: # pandas>=0.23.0 except ImportError: try: # pandas>=0.23.0 except ImportError: # pragma: no cover try: # pandas>=0.23.0 except ImportError: try: except ImportError: # pandas>=0.25.0 PanelGroupBy = None tqdm_kwargs = tqdm_kwargs.copy() deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)] def inner_generator(df_function='apply'): def inner(df, func, *args, **kwargs): """ Parameters ---------- df : (DataFrame|Series)[GroupBy] Data (may be grouped). func : function To be applied on the (grouped) data. **kwargs : optional Transmitted to `df.apply()`. """ # Precompute total iterations total = tqdm_kwargs.pop("total", getattr(df, 'ngroups', None)) if total is None: # not grouped if df_function == 'applymap': total = df.size elif isinstance(df, Series): total = len(df) elif (_Rolling_and_Expanding is None or not isinstance(df, _Rolling_and_Expanding)): # DataFrame or Panel axis = kwargs.get('axis', 0) if axis == 'index': axis = 0 elif axis == 'columns': axis = 1 # when axis=0, total is shape[axis1] total = df.size // df.shape[axis] # Init bar if deprecated_t[0] is not None: t = deprecated_t[0] deprecated_t[0] = None else: t = cls(total=total, **tqdm_kwargs) if len(args) > 0: # *args intentionally not supported (see #244, #299) TqdmDeprecationWarning( "Except func, normal arguments are intentionally" + " not supported by" + " `(DataFrame|Series|GroupBy).progress_apply`." + " Use keyword arguments instead.", fp_write=getattr(t.fp, 'write', sys.stderr.write)) try: # pandas>=1.3.0 except ImportError: is_builtin_func = df._is_builtin_func try: func = is_builtin_func(func) except TypeError: pass # Define bar updating wrapper def wrapper(*args, **kwargs): # update tbar correctly # it seems `pandas apply` calls `func` twice # on the first column/row to decide whether it can # take a fast or slow code path; so stop when t.total==t.n t.update(n=1 if not t.total or t.n < t.total else 0) return func(*args, **kwargs) # Apply the provided function (in **kwargs) # on the df using our wrapper (which provides bar updating) try: return getattr(df, df_function)(wrapper, **kwargs) finally: t.close() return inner # Monkeypatch pandas to provide easy methods # Enable custom tqdm progress in pandas! Series.progress_apply = inner_generator() SeriesGroupBy.progress_apply = inner_generator() Series.progress_map = inner_generator('map') SeriesGroupBy.progress_map = inner_generator('map') DataFrame.progress_apply = inner_generator() DataFrameGroupBy.progress_apply = inner_generator() DataFrame.progress_applymap = inner_generator('applymap') if Panel is not None: Panel.progress_apply = inner_generator() if PanelGroupBy is not None: PanelGroupBy.progress_apply = inner_generator() GroupBy.progress_apply = inner_generator() GroupBy.progress_aggregate = inner_generator('aggregate') GroupBy.progress_transform = inner_generator('transform') if Rolling is not None and Expanding is not None: Rolling.progress_apply = inner_generator() Expanding.progress_apply = inner_generator() elif _Rolling_and_Expanding is not None: _Rolling_and_Expanding.progress_apply = inner_generator() # override defaults via env vars @envwrap("TQDM_", is_method=True, types={'total': float, 'ncols': int, 'miniters': float, 'position': int, 'nrows': int}) def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None, ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None, ascii=None, disable=False, unit='it', unit_scale=False, dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0, position=None, postfix=None, unit_divisor=1000, write_bytes=False, lock_args=None, nrows=None, colour=None, delay=0.0, gui=False, **kwargs): """see tqdm.tqdm for arguments""" if file is None: file = sys.stderr if write_bytes: # Despite coercing unicode into bytes, py2 sys.std* streams # should have bytes written to them.
file = SimpleTextIOWrapper(
5
2023-12-14 07:43:03+00:00
8k
LkPrtctrd/BSL-V53
Heart/Record/ByteStream.py
[ { "identifier": "ByteStreamHelper", "path": "Heart/Record/ByteStreamHelper.py", "snippet": "class ByteStreamHelper:\n def readDataReference(self):\n result = []\n result.append(self.readVInt())\n if not result[0]:\n return None\n result.append(self.readVInt())\n return result\n\n def writeDataReference(self, high=0, low=-1):\n self.writeVInt(high)\n if high != 0:\n self.writeVInt(low)\n\n def compress(self, data):\n compressedText = zlib.compress(data)\n self.writeInt(len(compressedText) + 4)\n self.writeIntLittleEndian(len(data))\n self.buffer += compressedText\n\n def decompress(self):\n data_length = self.readInt()\n self.readIntLittleEndian()\n return zlib.decompress(self.readBytes(data_length - 4))\n\n def decodeIntList(self):\n length = self.readVInt()\n intList = []\n for i in range(length):\n intList.append(self.readVInt())\n return intList\n\n def decodeLogicLong(self, logicLong=None):\n if logicLong is None:\n logicLong = LogicLong(0, 0)\n high = self.readVInt()\n logicLong.high = high\n low = self.readVInt()\n logicLong.low = low\n\n def decodeLogicLongList(self):\n length = self.readVInt()\n logicLongList = []\n for i in range(length):\n logicLongList.append(LogicLong(self.readVInt(), self.readVInt()))\n return logicLongList\n\n def encodeIntList(self, intList):\n length = len(intList)\n self.writeVInt(length)\n for i in intList:\n self.writeVInt(i)\n\n def encodeLogicLong(self, logicLong):\n if logicLong is None:\n logicLong = LogicLong(0, 0)\n self.writeVInt(logicLong.getHigherInt(self))\n self.writeVInt(logicLong.getLowerInt(self))\n\n def encodeLogicLongList(self, logicLongList):\n length = len(logicLongList)\n self.writeVInt(self, length)\n for logicLong in logicLongList:\n self.writeVInt(logicLong.getHigherInt(self))\n self.writeVInt(logicLong.getLowerInt(self))" }, { "identifier": "ChecksumEncoder", "path": "Heart/Record/ChecksumEncoder.py", "snippet": "class ChecksumEncoder:\n def __init__(self):\n self.checksum = 0\n self.checksum2 = 0\n self.checksumEnabled = True\n\n def destruct(self):\n self.checksum = 0\n self.checksum2 = 0\n self.checksumEnabled = True\n\n def enableCheckSum(self, state):\n if not self.checksumEnabled or state:\n if not self.checksumEnabled and state:\n self.checksum = self.checksum2\n self.checksumEnabled = state\n else:\n self.checksum2 = self.checksum\n self.checksumEnabled = False\n\n def equals(self, checksum_instance):\n if not checksum_instance:\n return False\n\n if not checksum_instance.checksumEnabled:\n checksum = checksum_instance.checksum\n else:\n checksum2 = checksum_instance.checksum2\n\n if not self.checksumEnabled:\n checksum = self.checksum\n else:\n checksum2 = self.checksum2\n return checksum == checksum2\n\n def getCheckSum(self):\n if not self.checksumEnabled:\n checksum = self.checksum2\n else:\n checksum = self.checksum\n return checksum\n\n @staticmethod\n def hashCode():\n Debugger.error(\"ChecksumEncoder hashCode not designed\")\n return 42\n\n @staticmethod\n def isByteStream():\n return False\n\n def isCheckSumEnabled(self):\n return self.checksumEnabled\n\n @staticmethod\n def isCheckSumOnlyMode():\n return True\n\n def resetCheckSum(self):\n self.checksum = 0\n\n def writeBoolean(self, value):\n if value: integer = 13\n else: integer = 7\n self.checksum = integer + CPPDefs.__ROR4__(self.checksum, 31)\n\n def writeByte(self, value):\n self.checksum = CPPDefs.__ROR4__(self.checksum, 31) + value + 11\n\n def writeBytes(self, value, length):\n if value: integer = length + 38\n else: integer = 37\n self.checksum = CPPDefs.__ROR4__(self.checksum, 31)\n\n def writeInt8(self, value):\n if value + 0x80 >= 0x100:\n Debugger.error(\"\")\n self.checksum = CPPDefs.__ROR4__(self.checksum, 31) + value + 11\n\n def writeInt16(self, value):\n if value + 0x8000 >= 0x10000:\n Debugger.error(\"\")\n self.checksum = CPPDefs.__ROR4__(self.checksum, 31) + value + 19\n\n def writeInt24(self, value):\n if value + 0x800000 >= 0x1000000:\n Debugger.error(\"\")\n self.checksum = (value & 0xFFFFFF) + CPPDefs.__ROR4__(self.checksum, 31) + value + 21\n\n def writeInt(self, value):\n self.checksum = CPPDefs.__ROR4__(self.checksum, 31) + value + 9\n\n @staticmethod\n def writeLong(bytestream, logicLong):\n logicLong.encode(bytestream)\n\n def writeLongLong(self, logicLong):\n self.checksum = logicLong.getLowerInt() + CPPDefs.__ROR4__(logicLong.getHigherInt() + CPPDefs.__ROR4__(self.checksum, 31) + 67, 31) + 91\n\n def writeShort(self, value):\n self.checksum = CPPDefs.__ROR4__(self.checksum, 31) + value + 19\n\n def writeString(self, value):\n checksum = CPPDefs.__ROR4__(self.checksum, 31)\n if value:\n self.checksum = checksum + len(value) + 28\n else:\n self.checksum = checksum + 27\n\n def writeStringReference(self, value):\n self.checksum = len(value) + CPPDefs.__ROR4__(self.checksum, 31) + 38\n\n def writeVInt(self, value):\n self.checksum = value + CPPDefs.__ROR4__(self.checksum, 31) + 33\n\n def writeVLong(self, high, low):\n self.checksum = low + CPPDefs.__ROR4__(high + CPPDefs.__ROR4__(self.checksum, 31) + 65, 31) + 88" }, { "identifier": "LogicStringUtil", "path": "Heart/Logic/LogicStringUtil.py", "snippet": "class LogicStringUtil:\n @staticmethod\n def getBytes(string):\n return string.encode()\n\n @staticmethod\n def getByteLength(string):\n return len(string)" }, { "identifier": "Debugger", "path": "Heart/Record/Debugger.py", "snippet": "class Debugger:\n @staticmethod\n def error(message):\n print(\"[ERROR]\", message)\n\n @staticmethod\n def warning(message):\n print(\"[WARNING]\", message)" }, { "identifier": "LogicLong", "path": "Heart/Logic/LogicLong.py", "snippet": "class LogicLong:\n def __init__(self):\n self.high = 0\n self.low = 0\n\n def __init__(self, high, low):\n self.high = high\n self.low = low\n\n @staticmethod\n def clone(logicLong):\n return LogicLong(logicLong.high, logicLong.low)\n\n def decode(self, bytestream):\n self.high = bytestream.readInt()\n self.low = bytestream.readInt()\n\n def encode(self, bytestream):\n bytestream.writeInt(self.high)\n bytestream.writeInt(self.low)\n\n def equals(self, logicLong):\n if logicLong:\n if self.low == logicLong.low:\n return self.high == logicLong.high\n return False\n\n @staticmethod\n def getHigherInt(longlong):\n return longlong >> 32\n\n @overload\n def getHigherInt(self):\n return self.high\n\n @staticmethod\n def getLowerInt(longlong):\n result = longlong & 0x7FFFFFFF\n if longlong < 0:\n return longlong | 0x80000000\n return result\n\n @overload\n def getLowerInt(self):\n return self.low\n\n def getLong(self):\n result = self.low\n if result >> 31 == -1:\n return result | 0x80000000\n return result\n\n def greaterThan(self, logicLong):\n result = False\n if logicLong:\n result = True\n if self.high <= logicLong.high:\n result = False\n if self.high == logicLong.high:\n return self.low > logicLong.low\n return result\n\n def hashCode(self):\n return 31 * self.high + self.low\n\n def isZero(self):\n if not self.low:\n return self.high == 0\n else:\n return False\n\n def set(self, low, high):\n lowerInt = low & 0x7FFFFFFF\n if low < 0:\n lowerInt = low | 0x80000000\n self.high = high >> 32\n self.low = lowerInt\n\n def toLong(high, low):\n lowerInt = low & 0x7FFFFFFF\n if low < 0:\n lowerInt = low | 0x80000000\n return lowerInt | high << 32\n\n def toString(text, logiclong):\n print(text, f\"LogicLong({logiclong.high},{logiclong.low})\")" } ]
import zlib from Heart.Record.ByteStreamHelper import ByteStreamHelper from Heart.Record.ChecksumEncoder import ChecksumEncoder from Heart.Logic.LogicStringUtil import LogicStringUtil from Heart.Record.Debugger import Debugger from Heart.Logic.LogicLong import LogicLong
3,986
return 5 elif value > -2199023255552: return 6 elif value > -281474976710656: return 7 elif value > -36028797018963968: return 8 elif value > -4611686018427387903: return 9 else: return 10 else: if value < 64: return 1 elif value < 8192: return 2 elif value < 1048576: return 3 elif value < 134217727: return 4 elif value < 17179869184: return 5 elif value < 2199023255552: return 6 elif value < 281474976710656: return 7 elif value < 36028797018963968: return 8 elif value < 4611686018427387903: return 9 else: return 10 def isAtEnd(self): return len(self.messagePayload) <= self.offset @staticmethod def isByteStream(): return True @staticmethod def isCheckSumOnlyMode(): return False def readBoolean(self): bitoffset = self.bitoffset offset = self.offset + (8 - bitoffset >> 3) self.offset = offset self.bitoffset = bitoffset + 1 & 7 return (1 << (bitoffset & 31) & self.messagePayload[offset - 1]) != 0 def readByte(self): self.bitoffset = 0 result = self.messagePayload[self.offset] self.offset += 1 return result def readBytes(self, length, max=1000): self.bitoffset = 0 if (length & 0x80000000) != 0: if length != -1: Debugger.warning("Negative readBytes length encountered.") elif length <= max: result = self.messagePayload[self.offset:self.offset + length] self.offset += length return bytes(result) else: Debugger.warning("readBytes too long array, max", max) return b'' def readBytesLength(self): self.bitoffset = 0 result = (self.messagePayload[self.offset] << 24) result += (self.messagePayload[self.offset + 1] << 16) result += (self.messagePayload[self.offset + 2] << 8) result += (self.messagePayload[self.offset + 3]) self.offset += 4 return result def readInt8(self): self.bitoffset = 0 result = (self.messagePayload[self.offset]) self.offset += 1 return result def readInt16(self): self.bitoffset = 0 result = (self.messagePayload[self.offset] << 8) result += (self.messagePayload[self.offset + 1]) self.offset += 2 return result def readInt24(self): self.bitoffset = 0 result = (self.messagePayload[self.offset] << 16) result += (self.messagePayload[self.offset + 1] << 8) result += (self.messagePayload[self.offset + 2]) self.offset += 3 return result def readInt(self): self.bitoffset = 0 result = (self.messagePayload[self.offset] << 24) result += (self.messagePayload[self.offset + 1] << 16) result += (self.messagePayload[self.offset + 2] << 8) result += (self.messagePayload[self.offset + 3]) self.offset += 4 return result def readIntLittleEndian(self): self.bitoffset = 0 result = (self.messagePayload[self.offset]) result += (self.messagePayload[self.offset + 1] << 8) result += (self.messagePayload[self.offset + 2] << 16) result += (self.messagePayload[self.offset + 3] << 24) self.offset += 4 return result def readLong(self, logicLong=None): if not logicLong:
class ByteStream(ChecksumEncoder): def __init__(self, messageBuffer, unknown=0): super().__init__() self.messagePayload = messageBuffer self.bitoffset = 0 self.offset = 0 self.length = len(self.messagePayload) def clear(self, length): if self.messagePayload: self.messagePayload = b'' self.bitoffset = 0 self.offset = 0 def destroy(self): self.messagePayload = None self.bitoffset = 0 self.offset = 0 self.length = 0 def ensureCapacity(self, length): offset = self.offset if len(self.messagePayload) < offset + length: buffer_copy = self.messagePayload buf_len = length self.length = buf_len self.messagePayload += bytes([0] * buf_len) def writeHexa(self, data, length): self.bitoffset = 0 if data: if data.startswith('0x'): data = data[2:] self.messagePayload += bytes.fromhex(''.join(data.split()).replace('-', '')) self.offset += length def getBitOffset(self): return self.bitoffset def getByteArray(self): return self.messagePayload def getCapacityIncrement(self): return 100 def getDataPointer(self): return self.messagePayload[self.offset] def getLength(self): length = self.length if self.length <= self.offset: length = self.offset return length def getOffset(self): return self.offset @staticmethod def getVIntSizeInBytes(value): if value < 0: if value > -64: return 1 elif value > -8192: return 2 elif value > -1048576: return 3 elif value > -134217727: return 4 else: return 5 else: if value < 64: return 1 elif value < 8192: return 2 elif value < 1048576: return 3 elif value < 134217727: return 4 else: return 5 @staticmethod def getVLongSizeInBytes(value): if value < 0: if value > -64: return 1 elif value > -8192: return 2 elif value > -1048576: return 3 elif value > -134217727: return 4 elif value > -17179869184: return 5 elif value > -2199023255552: return 6 elif value > -281474976710656: return 7 elif value > -36028797018963968: return 8 elif value > -4611686018427387903: return 9 else: return 10 else: if value < 64: return 1 elif value < 8192: return 2 elif value < 1048576: return 3 elif value < 134217727: return 4 elif value < 17179869184: return 5 elif value < 2199023255552: return 6 elif value < 281474976710656: return 7 elif value < 36028797018963968: return 8 elif value < 4611686018427387903: return 9 else: return 10 def isAtEnd(self): return len(self.messagePayload) <= self.offset @staticmethod def isByteStream(): return True @staticmethod def isCheckSumOnlyMode(): return False def readBoolean(self): bitoffset = self.bitoffset offset = self.offset + (8 - bitoffset >> 3) self.offset = offset self.bitoffset = bitoffset + 1 & 7 return (1 << (bitoffset & 31) & self.messagePayload[offset - 1]) != 0 def readByte(self): self.bitoffset = 0 result = self.messagePayload[self.offset] self.offset += 1 return result def readBytes(self, length, max=1000): self.bitoffset = 0 if (length & 0x80000000) != 0: if length != -1: Debugger.warning("Negative readBytes length encountered.") elif length <= max: result = self.messagePayload[self.offset:self.offset + length] self.offset += length return bytes(result) else: Debugger.warning("readBytes too long array, max", max) return b'' def readBytesLength(self): self.bitoffset = 0 result = (self.messagePayload[self.offset] << 24) result += (self.messagePayload[self.offset + 1] << 16) result += (self.messagePayload[self.offset + 2] << 8) result += (self.messagePayload[self.offset + 3]) self.offset += 4 return result def readInt8(self): self.bitoffset = 0 result = (self.messagePayload[self.offset]) self.offset += 1 return result def readInt16(self): self.bitoffset = 0 result = (self.messagePayload[self.offset] << 8) result += (self.messagePayload[self.offset + 1]) self.offset += 2 return result def readInt24(self): self.bitoffset = 0 result = (self.messagePayload[self.offset] << 16) result += (self.messagePayload[self.offset + 1] << 8) result += (self.messagePayload[self.offset + 2]) self.offset += 3 return result def readInt(self): self.bitoffset = 0 result = (self.messagePayload[self.offset] << 24) result += (self.messagePayload[self.offset + 1] << 16) result += (self.messagePayload[self.offset + 2] << 8) result += (self.messagePayload[self.offset + 3]) self.offset += 4 return result def readIntLittleEndian(self): self.bitoffset = 0 result = (self.messagePayload[self.offset]) result += (self.messagePayload[self.offset + 1] << 8) result += (self.messagePayload[self.offset + 2] << 16) result += (self.messagePayload[self.offset + 3] << 24) self.offset += 4 return result def readLong(self, logicLong=None): if not logicLong:
logicLong = LogicLong(0, 0)
4
2023-12-14 18:57:56+00:00
8k
sockheadrps/AIODesa
tests/test_Database.py
[ { "identifier": "Db", "path": "aiodesa/database.py", "snippet": "class Db:\n \"\"\"\n Represents a simple SQLite database interface.\n\n Args:\n db_path : str\n The path to the SQLite database file.\n\n\n Example:\n\n .. code-block:: python\n\n class Users:\n username: str\n id: str | None = None\n table_name: str = \"users\"\n\n async with Db(\"database.sqlite3\") as db:\n await db.read_table_schemas(Users)\n ...\n\n \"\"\"\n\n _tables: dict\n db_path: Path\n _conn: Any\n\n def __init__(self, db_path: str) -> None:\n self.db_path = Path(db_path)\n self._conn = None\n self._create_db()\n self._tables = {}\n\n def _create_db(self) -> None:\n \"\"\"\n Internal method to create the database file if it does not exist.\n\n Notes:\n - This method is automatically called during the initialization of the\n Db class.\n - It ensures that the SQLite database file is created at the specified\n path if\n it does not exist.\n \"\"\"\n if not self.db_path.exists():\n self.db_path.parent.mkdir(parents=True, exist_ok=True)\n self.db_path.touch()\n\n async def _process_single_data_class(self, schema: Any) -> None:\n \"\"\"\n Process a single data class schema.\n\n Args:\n schema: The data class schema representing a table.\n\n Returns:\n This method does not return any value.\n \"\"\"\n if not is_dataclass(schema):\n raise ValueError(\"Provided schema is not a data class\")\n\n self._tables[schema.table_name] = schema\n class_fields = fields(schema)\n for field in class_fields:\n if field.name == \"table_name\":\n schema_ = make_schema(str(field.default), schema)\n await self._create_table(schema_, field.name)\n\n async def read_table_schemas(self, class_obj: Any | Tuple[Any, ...]) -> None:\n \"\"\"Read table schemas and create tables in the database.\n\n Args:\n schema:\n The schema or tuple of schemas to be processed. Each schema\n should be a data class representing a table.\n\n Returns:\n This method does not return any value.\n\n Example:\n\n .. code-block:: python\n\n class Users:\n username: str\n id: str | None = None\n table_name: str = \"users\"\n\n async with Db(\"database.sqlite3\") as db:\n await db.read_table_schemas(Users)\n ...\n\n Note:\n Provide any additional notes or considerations about the method.\n \"\"\"\n # single dataclass\n if is_dataclass(class_obj):\n await self._process_single_data_class(class_obj)\n return\n\n # tuple of dataclasses\n if isinstance(class_obj, tuple):\n for _obj in class_obj:\n await self._process_single_data_class(_obj)\n return\n\n async def _table_exists(self, table_name: str) -> bool | None:\n \"\"\"\n Create a table in the database based on the provided\n TableSchema instance.\n\n Args:\n table_name: The name of the table.\n\n Returns:\n None\n\n This method creates a table in the database with the specified name\n and schema.\n\n \"\"\"\n if self._conn is not None:\n query = \"SELECT name FROM sqlite_master \\\n WHERE type='table' AND name=?;\"\n cursor = await self._conn.execute(query, (table_name,))\n return await cursor.fetchone() is not None\n return None\n\n async def _create_table(self, named_data: TableSchema, name: str) -> None:\n \"\"\"\n Internal method to create a table in the database based on the provided\n TableSchema instance.\n\n Args:\n named_data: The TableSchema instance containing the table_name\n and SQL data definition.\n name: The name of the table.\n\n Returns:\n None\n\n Example:\n\n .. code-block:: python\n\n if is_dataclass(schema):\n class_fields = fields(schema)\n for field in class_fields:\n if field.name == \"table_name\":\n schema_ = make_schema(str(field.default), schema)\n await self._create_table(schema_, field.name)\n return\n\n\n This method creates a table in the database with the specified name\n and schema.\n\n Note:\n The `named_data` parameter should include the `table_name` property\n for the name of the table and the `sql_definition` property for the\n SQL data definition of the table.\n \"\"\"\n if self._conn is not None:\n if not await self._table_exists(name):\n async with self._conn.executescript(named_data.data) as cursor:\n await cursor.fetchall()\n await self._conn.commit()\n\n def insert(self, data_class: Any) -> Callable[..., Coroutine[Any, Any, None]]:\n \"\"\"\n Create a record and insert it into the specified table.\n\n Args:\n data_class: The data class representing the table structure.\n\n Returns:\n A function to be called with the record data.\n\n Example:\n\n .. code-block:: python\n\n class Users:\n username: str\n id: str | None = None\n table_name: str = \"users\"\n\n async with Db(\"database.sqlite3\") as db:\n await db.read_table_schemas(Users)\n ...\n insert = db.update(UserEcon)\n await insert(\"john_doe\")\n\n \"\"\"\n\n async def _record(*args: Any, **kwargs: Any) -> None:\n data_cls = self._tables[data_class.table_name](*args, **kwargs)\n field_vals = {}\n for field in fields(data_cls):\n value = getattr(data_cls, field.name)\n if value is not None and value != data_cls.table_name:\n field_vals[field.name] = value\n\n insertion_vals = tuple(field_vals.values())\n\n columns_str = \", \".join(field_vals.keys())\n placeholders = \", \".join(\"?\" for _ in insertion_vals)\n sql = f\"INSERT INTO {data_class.table_name} \\\n ({columns_str}) VALUES ({placeholders});\"\n await self._conn.execute(sql, insertion_vals)\n await self._conn.commit()\n return None\n\n return _record\n\n def update(\n self, data_class: Any, column_identifier: None | str = None\n ) -> Callable[..., Coroutine[Any, Any, None]]:\n \"\"\"\n Create a record update operation for the specified table.\n\n Args:\n data_class: The data class representing the table structure.\n column_identifier: The column to use for identifying records.\n\n Returns:\n A function to be called with the record data for updating.\n\n Example:\n\n .. code-block:: python\n\n class Users:\n username: str\n id: str | None = None\n table_name: str = \"users\"\n\n async with Db(\"database.sqlite3\") as db:\n await db.read_table_schemas(Users)\n ...\n update = db.update(UserEcon)\n await update(\"john_doe\")\n\n Note:\n If the `column_identifier` is not provided, the primary key of the\n data class will be used as the identifier.\n \"\"\"\n\n async def _record(*args, **kwargs) -> None:\n data_cls = self._tables[data_class.table_name](*args, **kwargs)\n values = []\n set_clauses_placeholders = []\n for column, value in kwargs.items():\n values.append(value)\n set_clause = f\"{column} = ?\"\n set_clauses_placeholders.append(set_clause)\n set_clause_string = \", \".join(set_clauses_placeholders)\n values.extend(args)\n identifier = (\n column_identifier\n if column_identifier is not None\n else data_cls.primary_key\n )\n sql = f\"UPDATE {data_class.table_name} SET \\\n {set_clause_string} WHERE {identifier} = ?\"\n\n await self._conn.execute(sql, tuple(values))\n await self._conn.commit()\n\n return _record\n\n def find(\n self, data_class: Any, column_identifier: None | str = None\n ) -> Callable[..., Coroutine[Any, Any, None]]:\n \"\"\"\n Create a record retrieval operation for the specified table.\n\n Args:\n data_class: The data class representing the table structure.\n column_identifier: The column to use for identifying records.\n Defaults to the primary key of the data class if not specified.\n\n Returns:\n A function to be called with the identifier for record retrieval.\n\n Example:\n\n .. code-block:: python\n\n class MyBestFriends:\n username: str\n id: str | None = None\n table_name: str = \"users\"\n\n async with Db(\"database.sqlite3\") as db:\n await db.read_table_schemas(MyBestFriends)\n\n ...\n\n find_jimmy = db.find(MyBestFriends)\n jimmy = await find_jimmy(\"jimmy\")\n\n \"\"\"\n\n async def _record(*args, **kwargs) -> None:\n data_cls = self._tables[data_class.table_name](*args, **kwargs)\n identifier = (\n column_identifier\n if column_identifier is not None\n else data_cls.primary_key\n )\n results = []\n sql = f\"SELECT * FROM {data_cls.table_name} WHERE {identifier} = ?\"\n sql_args = (args[0],)\n async with self._conn.execute(sql, sql_args) as cursor:\n results = await cursor.fetchall()\n if len(results) > 0:\n rows_fetched = results[0]\n data_cls = data_class(*rows_fetched, *results[1:])\n return data_cls\n return None\n\n return _record\n\n def delete(\n self, data_class: Any, column_identifier: None | str = None\n ) -> Callable[..., Coroutine[Any, Any, None]]:\n \"\"\"\n Create a record deletion operation for the specified table.\n This defaults to the primary key ifthe column_identifier is\n not provided.\n\n Args:\n data_class: The data class representing the table structure.\n column_identifier: The column to use for identifying records.\n\n Returns:\n A function to be called with the identifier for record deletion.\n\n Example:\n\n .. code-block:: python\n\n class Users:\n username: str\n id: str | None = None\n table_name: str = \"users\"\n\n async with Db(\"database.sqlite3\") as db:\n await db.read_table_schemas(Users)\n\n ...\n\n delete = db.delete(UserEcon)\n await delete(\"john_doe\")\n\n \"\"\"\n\n async def _record(*args, **kwargs) -> None:\n data_cls = self._tables[data_class.table_name](*args, **kwargs)\n identifier = (\n column_identifier\n if column_identifier is not None\n else data_cls.primary_key\n )\n\n sql = f\"DELETE FROM {data_cls.table_name} WHERE {identifier} = ?\"\n sql_args = (args[0],)\n\n async with self._conn.execute(sql, sql_args) as cursor:\n await cursor.fetchall()\n await self._conn.commit()\n\n return _record\n\n async def _connect(self) -> None:\n \"\"\"\n Establish a connection to the SQLite database.\n\n Returns:\n None\n\n Example:\n\n .. code-block:: python\n\n connection = YourDatabaseConnection()\n await connection.connect()\n # The database connection is now established.\n\n Note:\n This method initializes the connection to the SQLite database\n using the provided `db_path`.\n \"\"\"\n self._conn = await aiosqlite.connect(self.db_path)\n\n async def _close(self) -> None:\n \"\"\"\n Close the connection to the SQLite database.\n\n Returns:\n None\n\n Example:\n\n .. code-block:: python\n\n connection = YourDatabaseConnection()\n await connection.connect()\n\n # Your database operations here\n\n await connection.close()\n # The database connection is now closed.\n\n Note:\n This method closes the connection to the SQLite database if it is open.\n \"\"\"\n if self._conn is not None:\n await self._conn.close()\n self._conn = None\n\n async def __aenter__(self) -> \"Db\":\n \"\"\"\n Asynchronous context manager entry point.\n\n Automatically connects to the database upon entering the context.\n\n Returns:\n Db:\n The Db instance with an active database connection.\n\n Example:\n .. code-block:: python\n\n async with YourDatabaseConnection() as connection:\n # Your asynchronous code here\n\n # Upon entering the context, the database connection is automatically\n established.\n\n Note:\n This method is intended for use with the `async with` statement in an\n asynchronous context manager. The returned `Db` instance represents\n the connection to the database.\n \"\"\"\n await self._connect()\n # await self._conn.execute(\"BEGIN\")\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n \"\"\"\n Asynchronous context manager exit point.\n\n Automatically closes the database connection upon exiting the context.\n\n Args:\n exc_type (Type): The type of the exception raised, if any.\n exc_value (Exception): The exception object, if an exception\n occurred. Otherwise, None.\n traceback (TracebackType): The traceback information related to\n the exception, if any.\n\n Returns:\n None\n\n Example:\n .. code-block:: python\n\n async with YourDatabaseConnection() as connection:\n # Your asynchronous code here\n\n # Upon exiting the context, the database connection is\n automatically closed.\n\n Note:\n This method is intended for use with the `async with` statement in an\n asynchronous context manager.\n \"\"\"\n await self._close()" }, { "identifier": "make_schema", "path": "aiodesa/utils/table.py", "snippet": "def make_schema(name: str, data_cls: Any) -> TableSchema:\n \"\"\"\n Generate a TableSchema based on the provided data class.\n\n Args:\n name: The name of the table.\n data_cls: A data class defining the schema for the table.\n\n Returns:\n TableSchema: An instance of TableSchema containing the table_name and\n SQL data definition.\n\n Example:\n\n .. code-block:: python\n\n user_table_schema = generate_table_schema(name='users', data_cls=User)\n\n Note:\n The function returns a TableSchema instance containing the table_name\n and SQL data definition.\n \"\"\"\n columns = []\n name = name.replace(\" \", \"_\")\n for field_name, field_type in data_cls.__annotations__.items():\n if field_name == \"table_name\":\n pass\n else:\n columns.append(f\"{field_name} {py_to_sql_type(field_type)}\")\n if hasattr(data_cls, \"primary_key\"):\n columns.append(f\"PRIMARY KEY ({data_cls.primary_key})\")\n if hasattr(data_cls, \"unique_key\"):\n columns.append(f\"UNIQUE ({data_cls.unique_key})\")\n\n schema = TableSchema(\n name, f\"CREATE TABLE IF NOT EXISTS {name} (\\n{', '.join(columns)}\\n);\"\n )\n\n return schema" } ]
import pytest import aiosqlite import secrets from aiodesa import Db from aiodesa.utils.table import make_schema from dataclasses import dataclass, fields from pathlib import Path
4,587
@pytest.fixture def db_path(): """ DB path initializer """ return "test.sqlite3" @pytest.fixture(scope="session", autouse=True) def name(): """ DB name initializer _ is to satisfy SQL convention in case secrets returns a string with a numeric in position 0 """ return "_" + secrets.token_hex(16) def delete_test_db(db_path): """ For tearing down test """ file_name = db_path parent_folder = Path.cwd() file_path = parent_folder / file_name file_path = Path(file_path) if file_path.exists(): file_path.unlink() @pytest.fixture def test_data_fixture(name): """ Fixture for testing DB from dataclass """ @dataclass class TestData: test_column: str | None = None test_column_two: str | None = None table_name: str = name return TestData @pytest.mark.asyncio async def test_db_init(db_path): """ Tests the creation of the following class attributes self.db_path = Path(db_path) self._conn = None self._create_db() self._tables = {} """ db = Db(db_path) db_path = Path(db.db_path) assert db_path.is_file() assert db._conn == None assert isinstance(db._tables, dict) @pytest.mark.asyncio async def test_read_table_schemas_single_dataclass(test_data_fixture, db_path, name): """ Tests creation of table from single data class """ single_data_class = test_data_fixture async with Db(db_path) as db: await db.read_table_schemas(single_data_class) assert await db._table_exists(name) @pytest.mark.asyncio async def test_read_table_schemas_tuple_of_dataclasses(db_path): """ Tests creation of tables from tuple of data classes """ table_one_name = "_" + secrets.token_hex(16) table_two_name = "_" + secrets.token_hex(16) @dataclass class Dataclass1: id: int name: str table_name: str = table_one_name @dataclass class Dataclass2: id: int value: float table_name: str = table_two_name async with Db(db_path) as db: await db.read_table_schemas((Dataclass1, Dataclass2)) assert await db._table_exists(table_one_name) assert await db._table_exists(table_two_name) @pytest.mark.asyncio async def test_table_exists(db_path, test_data_fixture, name): """ Tests that the internal method _table_exists returns if tables exist or not """ async with Db(db_path) as db: assert not await db._table_exists("nonexistent_table") await db.read_table_schemas(test_data_fixture) assert await db._table_exists(name) @pytest.mark.asyncio async def test_create_table(test_data_fixture, db_path): """ Tests that _create_table actually creates the table. Test is done with raw sql, not by testing against internal class methods. """ class_fields = fields(test_data_fixture) db = Db(db_path) for field in class_fields: if field.name == "table_name":
# tests/test_database.py @pytest.fixture def db_path(): """ DB path initializer """ return "test.sqlite3" @pytest.fixture(scope="session", autouse=True) def name(): """ DB name initializer _ is to satisfy SQL convention in case secrets returns a string with a numeric in position 0 """ return "_" + secrets.token_hex(16) def delete_test_db(db_path): """ For tearing down test """ file_name = db_path parent_folder = Path.cwd() file_path = parent_folder / file_name file_path = Path(file_path) if file_path.exists(): file_path.unlink() @pytest.fixture def test_data_fixture(name): """ Fixture for testing DB from dataclass """ @dataclass class TestData: test_column: str | None = None test_column_two: str | None = None table_name: str = name return TestData @pytest.mark.asyncio async def test_db_init(db_path): """ Tests the creation of the following class attributes self.db_path = Path(db_path) self._conn = None self._create_db() self._tables = {} """ db = Db(db_path) db_path = Path(db.db_path) assert db_path.is_file() assert db._conn == None assert isinstance(db._tables, dict) @pytest.mark.asyncio async def test_read_table_schemas_single_dataclass(test_data_fixture, db_path, name): """ Tests creation of table from single data class """ single_data_class = test_data_fixture async with Db(db_path) as db: await db.read_table_schemas(single_data_class) assert await db._table_exists(name) @pytest.mark.asyncio async def test_read_table_schemas_tuple_of_dataclasses(db_path): """ Tests creation of tables from tuple of data classes """ table_one_name = "_" + secrets.token_hex(16) table_two_name = "_" + secrets.token_hex(16) @dataclass class Dataclass1: id: int name: str table_name: str = table_one_name @dataclass class Dataclass2: id: int value: float table_name: str = table_two_name async with Db(db_path) as db: await db.read_table_schemas((Dataclass1, Dataclass2)) assert await db._table_exists(table_one_name) assert await db._table_exists(table_two_name) @pytest.mark.asyncio async def test_table_exists(db_path, test_data_fixture, name): """ Tests that the internal method _table_exists returns if tables exist or not """ async with Db(db_path) as db: assert not await db._table_exists("nonexistent_table") await db.read_table_schemas(test_data_fixture) assert await db._table_exists(name) @pytest.mark.asyncio async def test_create_table(test_data_fixture, db_path): """ Tests that _create_table actually creates the table. Test is done with raw sql, not by testing against internal class methods. """ class_fields = fields(test_data_fixture) db = Db(db_path) for field in class_fields: if field.name == "table_name":
schema_ = make_schema(str(field.default), test_data_fixture)
1
2023-12-09 05:52:25+00:00
8k
DavidBellamy/labrador
scripts/preprocessing/pretraining_raw_data_to_labrador_jsonl.py
[ { "identifier": "mimic4_eCDFer", "path": "lab_transformers/data/tokenize_tabular_data.py", "snippet": "class mimic4_eCDFer:\n def __init__(self, ecdf_data: np.lib.npyio.NpzFile) -> None:\n \"\"\"\n Maps an iterable of lab codes and and an iterable of corresponding lab values to their probabilities on the corresponding eCDF.\n\n Parameters:\n ecdf_data: a NumPy .npz data archive containing named arrays: {itemid}_x and {itemid}_y for all itemid's in MIMIC-IV.\n {itemid}_x contains the *unique* values of the random variable (e.g. lab values).\n {itemid}_y contains the probabilities corresponding to P(X <= x) for that itemid.\n\n Note: {itemid}_x, {itemid}_y are index-aligned such that:\n ecdf_data[f\"{itemid}_y\"][i] = P(X <= ecdf_data[f\"{itemid}_x\"][i]) for all i.\n \"\"\"\n\n self.ecdf_data = ecdf_data\n self.itemids = list(set([int(itemid[:-2]) for itemid in ecdf_data.files]))\n\n def __call__(\n self,\n itemids: Union[Iterable[int], NDArray[np.int_]],\n lab_values: Union[Iterable[float], NDArray[np.float_]],\n null_token: Union[int, np.nan] = np.nan,\n ) -> NDArray[np.float_]:\n \"\"\"\n Returns Pr(X <= x) for all x in lab_values.\n i.e. maps all values in lab_values to their probabilities on the eCDF of the corresponding itemid\n\n itemids: an iterable of integer lab codes (called itemid's in MIMIC-IV).\n Missing values are not allowed because they are used to index into the eCDF database.\n lab_values: an iterable of float lab values.\n Missing values are allowed and will be mapped to null_token.\n null_token: the token to use for missing values. Default is np.nan.\n\n Returns an array of probabilities corresponding to the input lab_values.\n \"\"\"\n\n assert len(itemids) == len(\n lab_values\n ), \"itemids and lab_values must be the same length\"\n\n # Find the indices of the nearest values in the compressed eCDF cut-off points\n ixs = [\n self.find_nearest_ecdf_cutoff(itemid, labval)\n for itemid, labval in zip(itemids, lab_values)\n ]\n\n # Return the corresponding eCDF probabilities\n return np.array(\n [\n self.ecdf_data[f\"{itemid}_y\"][ix].item()\n if ix is not None\n else null_token\n for itemid, ix in zip(itemids, ixs)\n ]\n )\n\n def find_nearest_ecdf_cutoff(\n self, itemid: int, lab_value: float\n ) -> Union[int, None]:\n \"\"\"\n Finds the nearest value to `lab_value` in the eCDF for `itemid`.\n Returns the index of this nearest value or None if the lab_value is missing.\n \"\"\"\n if np.isnan(lab_value):\n idx = None\n else:\n lab_value = np.array(lab_value)\n idx = (\n np.abs(self.ecdf_data[f\"{itemid}_x\"] - lab_value.reshape(-1, 1))\n ).argmin(axis=1)\n\n return idx\n\n def __len__(self):\n return len(self.itemids)" }, { "identifier": "NpEncoder", "path": "lab_transformers/utils.py", "snippet": "class NpEncoder(json.JSONEncoder):\n \"\"\"A JSONEncoder subclass to handle Numpy integers, floats and arrays when writing JSON lines to disk.\n\n Usage: json.dumps(data, cls=NpEncoder)\n\n This function overwrites the default() method of JSONEncoder to handle additional types; specifically Numpy\n integers, floats and arrays. For all other types, the standard default() method is used for encoding.\n \"\"\"\n\n def default(\n self, obj: Union[np.integer, np.floating, np.ndarray, Any]\n ) -> Union[int, float, List[Any], Any]:\n if isinstance(obj, np.integer):\n return int(obj)\n elif isinstance(obj, np.floating):\n return float(obj)\n elif isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return super(NpEncoder, self).default(obj)" } ]
import json import os import os.path as op import sqlite3 import sys import numpy as np import pandas as pd from itertools import groupby from typing import Dict, Tuple, Union from numpy.typing import NDArray from statsmodels.distributions import ECDF from tqdm import tqdm from lab_transformers.data.tokenize_tabular_data import mimic4_eCDFer from lab_transformers.utils import NpEncoder
3,926
test_df = df[ (df.subject_id.isin(test_patients)) & (df.itemid.isin(train_itemids)) ] return { "train_patients": train_patients, "val_patients": val_patients, "test_patients": test_patients, }, {"train_df": train_df, "val_df": val_df, "test_df": test_df} def probability_transform_values( self, splits: Dict[str, pd.DataFrame] ) -> Tuple[pd.DataFrame]: train_df = splits["train_df"] val_df = splits["val_df"] test_df = splits["test_df"] unique_itemids = train_df.itemid.unique() compressed_ecdf_data = {} for itemid in tqdm(unique_itemids, desc="Computing eCDFs"): lab_values = train_df[ ~np.isnan(train_df.valuenum) & (train_df.itemid == itemid) ]["valuenum"].values if len(lab_values) == 0: continue # Calculate the empirical CDF for the current lab test ecdf = ECDF(lab_values) # Compress the eCDF to just the unique lab values (and their probabilities) unique_ixs = [] cum_lengths = 0 for _, g in groupby(ecdf.x): group = list(g) cum_lengths += len(group) unique_ix = cum_lengths - 1 unique_ixs.append(unique_ix) # Store the resulting compressed eCDF data compressed_ecdf_data[f"{itemid}_x"] = ecdf.x[unique_ixs] compressed_ecdf_data[f"{itemid}_y"] = ecdf.y[unique_ixs] # Save the compressed eCDF values and probabilities np.savez(op.join(self.output_path, "mimic4_ecdfs.npz"), **compressed_ecdf_data) # Load the result back and use it to probability transform the validation and test data splits ecdf_data = np.load(op.join(self.output_path, "mimic4_ecdfs.npz")) eCDFer = mimic4_eCDFer(ecdf_data) # Apply the training eCDFer to data splits train_df["probs"] = eCDFer(train_df["itemid"], train_df["valuenum"]) val_df["probs"] = eCDFer(val_df["itemid"], val_df["valuenum"]) test_df["probs"] = eCDFer(test_df["itemid"], test_df["valuenum"]) return train_df, val_df, test_df def write_json_lines( self, patient_dict: Dict[str, NDArray[np.integer]], train_df: pd.DataFrame, val_df: pd.DataFrame, test_df: pd.DataFrame, test_number: Union[int, None] = None, ) -> None: train_patients = patient_dict["train_patients"] val_patients = patient_dict["val_patients"] test_patients = patient_dict["test_patients"] # Create the output paths for the 3 data splits train_jsonl_file = os.path.join( self.output_path, f"labrador_train_patients{test_number}.jsonl" ) val_jsonl_file = os.path.join( self.output_path, f"labrador_validation_patients{test_number}.jsonl" ) test_jsonl_file = os.path.join( self.output_path, f"labrador_test_patients{test_number}.jsonl" ) # Write the 3 data splits to their respective paths self.json_lines_writer(train_jsonl_file, train_patients, train_df, "training") self.json_lines_writer(val_jsonl_file, val_patients, val_df, "validation") self.json_lines_writer(test_jsonl_file, test_patients, test_df, "testing") def json_lines_writer( self, filepath: str, patient_list: NDArray[np.integer], df: pd.DataFrame, name: str, ) -> None: # Generate JSON lines and write to train_set.jsonl, val_set.jsonl, and test_set.jsonl at output_path first_line = True mode = "w" # Make an index out of subject_id for faster subsetting of the df df.set_index("subject_id", inplace=True) for patient in tqdm(patient_list, desc=f"Writing {name} JSON lines..."): temp = df.loc[df.index == patient] # Filter out patients that only have a single lab (no bag to learn context from) if len(temp) < 2: continue # skip this patient # Create individual patient JSON line patient_jsonl = { "subject_id": patient, "lab_codes": [ self.frequency_ranks[code] for code in temp.itemid.values ], "lab_values": temp.probs.fillna("<NULL>").values.tolist(), "time_deltas": temp.time_delta.values.tolist(), "hadm_id": temp.hadm_id.values.tolist(), "charttime": np.datetime_as_string(temp.charttime, unit="m").tolist(), } # Write it to file with open(filepath, mode=mode, encoding="utf-8") as f:
""" This script loads the MIMIC-IV labevents.csv data at data_path, and creates a JSON line for each patient that contains: subject_id, lab_codes (encoded as their frequency ranking), lab_values (after eCDF transformation), time_deltas, hospital admission ID (hadm_id), and charttime of the lab test, where time_deltas are the time in days (float) between each lab measurement. A <NULL> string is used in the place of labs without numeric values/entries. """ class MakeJSONlines: def __init__( self, raw_lab_data_file_name: str, raw_admissions_data_file_name: str, data_path: str, output_path: str, random_seed: int, train_pct: float, val_pct: float, test_pct: float, min_frequency: int, ) -> None: self.raw_labfile = raw_lab_data_file_name self.raw_admissionsfile = raw_admissions_data_file_name self.data_path = data_path self.output_path = output_path self.random_seed = random_seed self.train_pct = train_pct self.val_pct = val_pct self.test_pct = test_pct self.min_frequency = min_frequency # Create a controllabe RNG for data splitting self.rng = np.random.default_rng(self.random_seed) # Initialize attribute for holding the frequency ranks of the categorical vocabulary # This is filled in by the compute_frequency_ranks() method self.frequency_ranks = None def call(self, test_number: Union[int, None] = None) -> None: print("Loading raw data...\n") df, admissions = self.load_data() print("Filtering low frequency lab tests...\n") df = self.filter_rare_categorical(df) print("Merging in hadm_id's for each lab test...\n") df = self.merge_in_hadm_id_from_admissions(df, admissions) print("Computing frequency rankings of lab codes...\n") self.frequency_ranks = self.compute_frequency_ranks(df) print("Computing time deltas between labs...\n") df = self.compute_time_delta(df) print("Splitting data into train, validation, test...\n") patient_dict, data_dict = self.split_data(df) print("Transforming lab values into probabilities via the eCDF...\n") train_df, val_df, test_df = self.probability_transform_values(data_dict) print("Writing JSON lines to disk...\n") self.write_json_lines(patient_dict, train_df, val_df, test_df, test_number) def load_data(self) -> Tuple[pd.DataFrame, pd.DataFrame]: # Load labevents.csv (requires ~32Gb of memory) lab_data_path = os.path.join(self.data_path, self.raw_labfile) labevents = pd.read_csv( lab_data_path, dtype={ "labevent_id": int, "subject_id": int, "hadm_id": "Int64", # Pandas nullable Int type "specimen_id": int, "itemid": int, "charttime": "string", "storetime": "string", "value": object, "valuenum": float, "valueuom": "string", "ref_range_lower": float, "ref_range_upper": float, "flag": "string", "priority": "string", "comments": "string", }, ) # Subset to only the columns needed for json lines columns_needed = ["subject_id", "itemid", "valuenum", "charttime", "hadm_id"] df = labevents[columns_needed] # Load admissions.csv (will be used to merge in hadm_id's for each lab test) admissions_data_path = os.path.join(self.data_path, self.raw_admissionsfile) admissions = pd.read_csv(admissions_data_path) admissions["admittime"] = pd.to_datetime(admissions["admittime"]) admissions["dischtime"] = pd.to_datetime(admissions["dischtime"]) admissions = admissions[ ["subject_id", "hadm_id", "admittime", "dischtime", "edregtime"] ] # subset the necessary cols return df, admissions def filter_rare_categorical(self, raw_lab_data: pd.DataFrame) -> pd.DataFrame: # Filter out itemid's with insufficient frequency # Note: the first filter condition selects lab codes that have no numeric values but occur >= MIN_FREQUENCY times # the second filter condition selects lab codes that have numeric values, # and both the numeric values and codes occur >= MIN_FREQUENCY times filtered_lab_data = raw_lab_data.groupby("itemid").filter( lambda x: ( len(x["itemid"]) >= self.min_frequency and len(x["valuenum"].dropna()) == 0 ) or ( len(x["valuenum"].dropna()) >= self.min_frequency and len(x["itemid"]) >= self.min_frequency ) ) return filtered_lab_data def merge_in_hadm_id_from_admissions( self, df: pd.DataFrame, admissions: pd.DataFrame ) -> pd.DataFrame: # Make the db in memory (requires 90+ Gb of memory with full labevents.csv) conn = sqlite3.connect(":memory:") # write the tables df.to_sql("df", conn, index=False) admissions.to_sql("admissions", conn, index=False) qry = """select df.subject_id, itemid, valuenum, charttime, df.hadm_id labs_hadm_id, admissions.hadm_id adm_hadm_id from df left join admissions on ((charttime between case when edregtime is not null then min(edregtime, admittime) else admittime end and dischtime) and admissions.subject_id = df.subject_id)""" # Perform the SQL merge/join df = pd.read_sql_query(qry, conn) # Drop rows where both hadm_id's exist but aren't equal (only ~0.01% of rows have this) df = df[ ~( (df.labs_hadm_id != df.adm_hadm_id) & ~(df.labs_hadm_id.isnull()) & ~(df.adm_hadm_id.isnull()) ) ] # Merge the two hadm_id columns together df["hadm_id"] = df["labs_hadm_id"].fillna(df["adm_hadm_id"]) # Drop the labs_hadm_id and adm_hadm_id columns df = df.drop(["labs_hadm_id", "adm_hadm_id"], axis=1) return df def compute_frequency_ranks(self, raw_lab_data: pd.DataFrame) -> Dict[str, int]: # Next, we will determine the integer frequency rank of each lab code in the raw data # compute frequency of each unique lab code labcode_freqs = dict(raw_lab_data.itemid.value_counts()) # replace frequencies of lab codes with their integer rank (ranks start at 1) frequency_ranks = {} for i, (key, value) in enumerate(labcode_freqs.items()): frequency_ranks[key] = i + 1 # Save the map from MIMIC-IV lab codes to their frequency ranks (useful for getting descriptions of lab codes) codebook = pd.DataFrame.from_dict(frequency_ranks, orient="index").reset_index() codebook.columns = ["itemid", "frequency_rank"] d_labitems = os.path.join(self.data_path, "d_labitems.csv") labitem_descriptions = pd.read_csv( d_labitems ) # load descriptions of each lab code codebook = codebook.merge( labitem_descriptions, on="itemid" ) # merge the descriptions with the codebook filename = os.path.join(self.output_path, "labcode_codebook_labrador.csv") codebook.to_csv(filename, index=False) # save the codebook return frequency_ranks def compute_time_delta(self, df: pd.DataFrame) -> pd.DataFrame: # Convert charttime Pandas datetime (for computing time deltas later) df["charttime"] = pd.to_datetime(df["charttime"]) # Sort by subject_id and charttime (ascending) df = df.sort_values(["subject_id", "charttime"], inplace=False) # calculate time deltas (next time minus previous time) df["time_delta"] = df.charttime - df.charttime.shift(1) # correct rows at border between 2 patients (replace with 0) df.loc[(df.subject_id != df.subject_id.shift(1)), "time_delta"] = pd.Timedelta( "0 days" ) # Convert time_delta's to decimal days (e.g. 5.35 days) df["time_delta"] = df["time_delta"].dt.total_seconds() / (60 * 60 * 24) return df def split_data( self, df: pd.DataFrame ) -> Tuple[Dict[str, NDArray[np.integer]], Dict[str, pd.DataFrame]]: # Sort patients into train/validation/test sets patient_list = df.subject_id.unique() # Shuffle the order of patients self.rng.shuffle(patient_list) train_size = int(np.floor(self.train_pct * len(patient_list))) val_size = int(np.ceil(self.val_pct * len(patient_list))) test_size = int(len(patient_list) - train_size - val_size) train_patients = patient_list[:train_size] val_patients = patient_list[train_size : train_size + val_size] test_patients = patient_list[train_size + val_size :] # Split out the training data train_df = df[df.subject_id.isin(train_patients)] # Extract the unique itemid's from the training data partition train_itemids = train_df.itemid.unique() # Split out the val/test sets if the itemid also exists in the training data val_df = df[ (df.subject_id.isin(val_patients)) & (df.itemid.isin(train_itemids)) ] test_df = df[ (df.subject_id.isin(test_patients)) & (df.itemid.isin(train_itemids)) ] return { "train_patients": train_patients, "val_patients": val_patients, "test_patients": test_patients, }, {"train_df": train_df, "val_df": val_df, "test_df": test_df} def probability_transform_values( self, splits: Dict[str, pd.DataFrame] ) -> Tuple[pd.DataFrame]: train_df = splits["train_df"] val_df = splits["val_df"] test_df = splits["test_df"] unique_itemids = train_df.itemid.unique() compressed_ecdf_data = {} for itemid in tqdm(unique_itemids, desc="Computing eCDFs"): lab_values = train_df[ ~np.isnan(train_df.valuenum) & (train_df.itemid == itemid) ]["valuenum"].values if len(lab_values) == 0: continue # Calculate the empirical CDF for the current lab test ecdf = ECDF(lab_values) # Compress the eCDF to just the unique lab values (and their probabilities) unique_ixs = [] cum_lengths = 0 for _, g in groupby(ecdf.x): group = list(g) cum_lengths += len(group) unique_ix = cum_lengths - 1 unique_ixs.append(unique_ix) # Store the resulting compressed eCDF data compressed_ecdf_data[f"{itemid}_x"] = ecdf.x[unique_ixs] compressed_ecdf_data[f"{itemid}_y"] = ecdf.y[unique_ixs] # Save the compressed eCDF values and probabilities np.savez(op.join(self.output_path, "mimic4_ecdfs.npz"), **compressed_ecdf_data) # Load the result back and use it to probability transform the validation and test data splits ecdf_data = np.load(op.join(self.output_path, "mimic4_ecdfs.npz")) eCDFer = mimic4_eCDFer(ecdf_data) # Apply the training eCDFer to data splits train_df["probs"] = eCDFer(train_df["itemid"], train_df["valuenum"]) val_df["probs"] = eCDFer(val_df["itemid"], val_df["valuenum"]) test_df["probs"] = eCDFer(test_df["itemid"], test_df["valuenum"]) return train_df, val_df, test_df def write_json_lines( self, patient_dict: Dict[str, NDArray[np.integer]], train_df: pd.DataFrame, val_df: pd.DataFrame, test_df: pd.DataFrame, test_number: Union[int, None] = None, ) -> None: train_patients = patient_dict["train_patients"] val_patients = patient_dict["val_patients"] test_patients = patient_dict["test_patients"] # Create the output paths for the 3 data splits train_jsonl_file = os.path.join( self.output_path, f"labrador_train_patients{test_number}.jsonl" ) val_jsonl_file = os.path.join( self.output_path, f"labrador_validation_patients{test_number}.jsonl" ) test_jsonl_file = os.path.join( self.output_path, f"labrador_test_patients{test_number}.jsonl" ) # Write the 3 data splits to their respective paths self.json_lines_writer(train_jsonl_file, train_patients, train_df, "training") self.json_lines_writer(val_jsonl_file, val_patients, val_df, "validation") self.json_lines_writer(test_jsonl_file, test_patients, test_df, "testing") def json_lines_writer( self, filepath: str, patient_list: NDArray[np.integer], df: pd.DataFrame, name: str, ) -> None: # Generate JSON lines and write to train_set.jsonl, val_set.jsonl, and test_set.jsonl at output_path first_line = True mode = "w" # Make an index out of subject_id for faster subsetting of the df df.set_index("subject_id", inplace=True) for patient in tqdm(patient_list, desc=f"Writing {name} JSON lines..."): temp = df.loc[df.index == patient] # Filter out patients that only have a single lab (no bag to learn context from) if len(temp) < 2: continue # skip this patient # Create individual patient JSON line patient_jsonl = { "subject_id": patient, "lab_codes": [ self.frequency_ranks[code] for code in temp.itemid.values ], "lab_values": temp.probs.fillna("<NULL>").values.tolist(), "time_deltas": temp.time_delta.values.tolist(), "hadm_id": temp.hadm_id.values.tolist(), "charttime": np.datetime_as_string(temp.charttime, unit="m").tolist(), } # Write it to file with open(filepath, mode=mode, encoding="utf-8") as f:
json_record = json.dumps(patient_jsonl, cls=NpEncoder)
1
2023-12-09 20:40:17+00:00
8k
NLP-Core-Team/RealCode_eval
main.py
[ { "identifier": "InfillGenerator", "path": "lm_eval/generators.py", "snippet": "class InfillGenerator:\n def __init__(self, \n model_path: str,\n num_samples: int,\n prefix_tokens: tp.Union[str, tp.List[int]] = [],\n middle_tokens: tp.Union[str, tp.List[int]] = [],\n suffix_tokens: tp.Union[str, tp.List[int]] = [],\n max_context_length: int = None,\n left_context_ratio: int = 1,\n dtype = torch.bfloat16,\n eos_sequences: tp.List[str] = [\"\\sclass\\s\", \"\\sdef\\s\", \"\\s@\", \"<|endoftext|>\", \"<extra_id_0>\"],\n model_kwargs: tp.Dict = {},\n generation_params: tp.Dict[str, tp.Any] = {},\n context_parser: BaseParser = TrivialContextParser(),\n add_extra_spaces_to_generation=0,\n ):\n \"\"\"\n Class to generate code in fill-in-the-middle mode\n params:\n model_path: str - which model to use for generation, anything that can be passed to AutoModelForCausalLM.from_pretrained\n num_samples: int - number of samples to generate per task, values > 1 should be paired with generation_params\n prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the left context. Can be either str or list of int tokens\n middle_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the right context (see Fill-In-the-Middle). Can be either str or list of int tokens\n suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the right context (see Fill-In-the-Middle). Can be either str or list of int tokens\n max_context_length: int = None - truncation length for prompt, measured in tokens (len(left_context) + len(right_context) < max_context_length) \n left_context_ratio: int = 1 - proportion of max_context_length given to left_context. 1 means 1:1 split between left and right, 3 means 3:1 split in favor of left context \n dtype=torch.bfloat16 - torch dtype to use for inference\n eos_sequences: tp.List[str] = [\"\\sclass\\s\", \"\\sdef\\s\", \"\\s@\", \"<|endoftext|>\", \"<extra_id_0>\"] - regular expressions that determine end of geneartion\n model_kwargs: tp.Dict = {} - kwargs to be passed to AutoModelForCausalLM.from_pretrained\n generation_params: tp.Dict[str, tp.Any] = {} - kwargs to be passed to AutoModelForCausalLM.generate\n context_parser: BaseParser = TrivialContextParser() - parser for left and right contexts\n add_extra_spaces_to_generation=0 - number of added extra spaces add the begining of generation to fix indentation. May be required due to bugs in some tokenizers (e.g. Codellama)\n \"\"\"\n self.device = torch.device(\"cuda\")\n # self.device = torch.device(\"cpu\")\n logger.info(f\"Loading model from {model_path} with kwargs f{model_kwargs}\")\n self.tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n self.model = AutoModelForCausalLM.from_pretrained(model_path, \n torch_dtype=dtype, device_map=\"auto\", trust_remote_code=True, **model_kwargs\n ).eval() \n logger.info(f\"Loaded model from {model_path} with kwargs f{model_kwargs}\")\n logger.info(f\"Device map: \\n{self.model.hf_device_map}\")\n\n self.num_samples = num_samples\n \n self.prefix_tokens = self.tokenize_special_tokens(prefix_tokens)\n self.middle_tokens = self.tokenize_special_tokens(middle_tokens)\n self.suffix_tokens = self.tokenize_special_tokens(suffix_tokens)\n\n logger.debug(f\"prefix_tokens: {self.prefix_tokens}, middle_tokens: {self.middle_tokens}, suffix_tokens: {self.suffix_tokens}\")\n\n self.eos_sequences = eos_sequences[:]\n\n #context truncation parameters\n self.max_context_length = max_context_length\n self.left_context_truncate_at = left_context_ratio / (left_context_ratio + 1)\n self.right_context_truncate_at = 1 / (left_context_ratio + 1)\n\n self.generation_params = generation_params\n self.generation_params['num_return_sequences'] = self.num_samples\n\n self.context_parser = context_parser\n # Number of tokens before and after truncating to max_context_length\n self.count_inferenced_tokens = []\n self.count_possible_tokens = []\n self.add_extra_spaces_to_generation = add_extra_spaces_to_generation\n\n def tokenize_special_tokens(self, str_or_list: tp.Union[str, tp.List[int]]) -> torch.Tensor: \n if type(str_or_list) == str:\n return self.tokenizer.encode(str_or_list, return_tensors=\"pt\", add_special_tokens=False).to(self.device) # ['input_ids']\n else:\n return torch.as_tensor(str_or_list).unsqueeze(0).to(self.device)\n\n def _prepare_tokens(self, task: Task) -> torch.Tensor:\n left_context_str, right_context_str = self.context_parser.get_left_and_right_context(task)\n logger.info(\"\\n\" + \"\\n\".join(left_context_str.split('\\n')[-20:]))\n left_tokens = self.tokenizer.encode(\n left_context_str, return_tensors=\"pt\", add_special_tokens=False).to(self.device) # ['input_ids']\n right_tokens = self.tokenizer.encode(\n right_context_str, return_tensors=\"pt\", add_special_tokens=False).to(self.device) # ['input_ids']\n self.count_possible_tokens.append(left_tokens.shape[1] + right_tokens.shape[1])\n if self.max_context_length and left_tokens.shape[1] + right_tokens.shape[1] > self.max_context_length:\n logger.debug(\"Truncating context\")\n \n left_tokens = left_tokens[:, -min(int(self.max_context_length * self.left_context_truncate_at), left_tokens.shape[1]) + 1:]\n right_tokens = right_tokens[:, :min(int(self.max_context_length * self.right_context_truncate_at), right_tokens.shape[1]) - 1]\n tokens = torch.cat([self.prefix_tokens, left_tokens, self.middle_tokens, right_tokens, self.suffix_tokens], dim=-1).type(torch.long)\n return tokens\n \n def _postprocess(self, generation: str):\n new_gen = []\n for i, line in enumerate(generation.split('\\n')):\n if i == 0 and self.add_extra_spaces_to_generation: \n # ugly hack for codellama, weirdly removing space for skip_special_tokens=True\n line = ' '*self.add_extra_spaces_to_generation + line\n for eos in self.eos_sequences:\n if re.search(eos, line):\n return \"\\n\".join(new_gen).rstrip() + '\\n\\n'\n new_gen.append(line)\n return \"\\n\".join(new_gen).rstrip() + '\\n\\n'\n\n @torch.no_grad()\n def generate(self, tasks: tp.List[Task]) -> tp.List[tp.List[str]]:\n res = []\n for i, task in tqdm(enumerate(tasks)):\n tokens = self._prepare_tokens(task)\n if i == 0:\n logger.debug(f\"\\nTokens: {tokens[:, :5]} ... {tokens[:, -5:]}\\n\")\n generated_tokens = self.model.generate(tokens, **self.generation_params)\n generations = self.tokenizer.batch_decode(generated_tokens[:, tokens.shape[1]:], skip_special_tokens=True)\n if i % 1 == 0:\n logger.debug(f\"Generation for task {i}:\\n{self._postprocess(generations[0])}\")\n res.append([self._postprocess(t) for t in generations])\n self.count_inferenced_tokens.append([len(t) for t in tokens])\n return res" }, { "identifier": "LMGenerator", "path": "lm_eval/generators.py", "snippet": "class LMGenerator(InfillGenerator):\n def __init__(self, \n lm_prefix_tokens: tp.Union[str, tp.List[int]] = [],\n lm_suffix_tokens: tp.Union[str, tp.List[int]] = [],\n **kwargs\n ):\n \"\"\"\n Class to generate code in causal LM mode, uses only left context\n params:\n lm_prefix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert before the context. Can be either str or list of int tokens\n lm_suffix_tokens: tp.Union[str, tp.List[int]] = [] - tokens to insert after the context. Can be either str or list of int tokens\n \"\"\"\n super().__init__(**kwargs)\n self.lm_prefix_tokens = super().tokenize_special_tokens(lm_prefix_tokens)\n self.lm_suffix_tokens = super().tokenize_special_tokens(lm_suffix_tokens)\n logger.debug(f\"lm_prefix_tokens: {self.lm_prefix_tokens}, lm_suffix_tokens: {self.lm_suffix_tokens}\")\n\n def _prepare_tokens(self, task: Task) -> torch.Tensor:\n left_context_str, _ = self.context_parser.get_left_and_right_context(task)\n logger.info(\"\\n\" + \"\\n\".join(left_context_str.split('\\n')[-20:]))\n left_tokens = self.tokenizer.encode(\n left_context_str, return_tensors=\"pt\", add_special_tokens=False).to(self.device) # ['input_ids']\n self.count_possible_tokens.append(left_tokens.shape[1])\n if self.max_context_length and left_tokens.shape[1] > self.max_context_length:\n left_tokens = left_tokens[:, -self.max_context_length:]\n tokens = torch.cat([self.lm_prefix_tokens, left_tokens, self.lm_suffix_tokens], dim=-1).type(torch.long)\n return tokens" }, { "identifier": "Evaluator", "path": "lm_eval/evaluator.py", "snippet": "class Evaluator:\n def __init__(self, \n dataset_root: os.PathLike,\n num_samples: int,\n pass_k_list: tp.List[int] = [1],\n njobs: int = 1,\n working_dir: tp.Optional[os.PathLike] = None,\n metric_aggregations: tp.Dict[str, tp.Callable[[Task], int]] = METRIC_AGGREGATIONS\n ):\n self.metrics = []\n for pass_k in pass_k_list:\n if num_samples < pass_k:\n raise ValueError(f\"num_samples {num_samples} must be greater than or equal to PassK={pass_k}\")\n self.metrics.append(PassK(pass_k, num_samples))\n self.dataset_root = dataset_root\n self.num_samples = num_samples\n self.njobs = njobs\n self.working_dir = working_dir\n self.metric_aggregations = metric_aggregations\n \n def evaluate(self, \n tasks: tp.List[Task],\n generations: tp.List[tp.List[str]],\n ) -> tp.Dict[tp.Literal[\"aggregated\", \"detailed\"], tp.Any]:\n logger.info(f\"Evaluating {len(tasks)} tasks with {self.num_samples} samples on {self.njobs} CPUs\")\n # Run test evaluation\n if self.njobs == 1:\n results = [\n [evaluate_override( self.dataset_root, task, gen, os.path.join(self.working_dir) ) for gen in generations[i]]\n for i, task in enumerate(tasks)\n ]\n else:\n with Manager() as manager:\n cache = manager.dict()\n with manager.Pool(processes=self.njobs) as pool:\n results = [[None for _2 in range(self.num_samples)] for _ in tasks]\n async_result = pool.starmap_async(\n evaluate_override_wrapped, [\n ( self.dataset_root, task, gen, os.path.join(self.working_dir, f\"{j}_{i}\"), j, i, cache )\n for j, task in enumerate(tasks) for i, gen in enumerate(generations[j])\n ]\n )\n res = async_result.get()\n for task_n, gen_n, result in res:\n results[task_n][gen_n] = result\n if task_n % 25 == 0 and gen_n == 0:\n logger.debug(result['output'])\n\n # Calculate metrics per task\n all_metric_names = ['compilation_error_rate', 'exact_match'] + [t.name() for t in self.metrics]\n metrics = []\n agg_metrics = {level: {metric_name: defaultdict(list) for metric_name in all_metric_names} for level in self.metric_aggregations}\n for task, task_results, task_generations in zip(tasks, results, generations):\n if len(task_results) != self.num_samples:\n raise ValueError(f\"Task {task} has {len(task_results)} samples, expected {self.num_samples}\")\n correct = sum([int(t['passed'] == task.total_tests) for t in task_results])\n not_compiles = mean([int(t['passed'] + t['failed'] == 0) for t in task_results])\n exact_match = mean([int(re.sub(r'\\W+', '', task.gt) == re.sub(r'\\W+', '', gen)) for gen in task_generations])\n task_metrics = {'compilation_error_rate': not_compiles, 'exact_match': exact_match}\n for metric in self.metrics:\n task_metrics[metric.name()] = metric(correct)\n task_metrics['evaluations'] = [t['output'] for t in task_results]\n metrics.append(task_metrics)\n for level, level_func in self.metric_aggregations.items():\n for metric in all_metric_names:\n agg_metrics[level][metric][level_func(task)].append(task_metrics[metric])\n \n for level in self.metric_aggregations:\n for metric_name in all_metric_names:\n means = {val: mean(agg_metrics[level][metric_name][val]) for val in agg_metrics[level][metric_name]}\n agg_metrics[level][metric_name] = means\n\n # Save metics\n metrics = agg_metrics | {\n \"detailed\": [asdict(task) | task_metric for task, task_metric in zip(tasks, metrics)]\n }\n return metrics" }, { "identifier": "TrivialContextParser", "path": "lm_eval/context_parser.py", "snippet": "class TrivialContextParser(BaseParser):\n def get_left_and_right_context(self, task: Task) -> tp.Tuple[str, str]:\n \"\"\"\n returns left and right context without processing\n \"\"\"\n return task.left_context, task.right_context" }, { "identifier": "load_dataset", "path": "lm_eval/utils.py", "snippet": "def load_dataset(root_path: os.PathLike, meta_file: str = 'dataset.json', limit: int = 10_000) -> List[Task]:\n with open(Path(root_path) / meta_file, 'r') as f:\n dataset = [Task(**t) for t in json.load(f)][:limit]\n return dataset " } ]
import hydra import torch import numpy as np import random import json import os import logging from lm_eval.generators import InfillGenerator, LMGenerator from lm_eval.evaluator import Evaluator from lm_eval.context_parser import TrivialContextParser from lm_eval.utils import load_dataset from omegaconf import DictConfig, OmegaConf
3,770
logger = logging.getLogger("RealCode") logger.setLevel(logging.DEBUG) def seed_all(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) @hydra.main(config_path="config", config_name="config") def main(cfg: DictConfig) -> None: seed_all(cfg.seed) print(cfg) dataset = load_dataset(cfg.dataset_root, cfg.dataset_meta_file, cfg.limit) logger.info(f"loaded {cfg.dataset_root} {cfg.dataset_meta_file}") if 'context_parser' in cfg: parser = hydra.utils.instantiate(cfg.context_parser) else: parser = TrivialContextParser() dtype_map = {'fp16': torch.float16, 'fp32': torch.float, 'bf16': torch.bfloat16} if cfg.generator_mode == 'infill': generator = InfillGenerator( add_extra_spaces_to_begin=0, model_path=cfg.model_path, dtype=dtype_map[cfg.dtype], num_samples=cfg.num_samples, prefix_tokens=cfg.prefix_tokens, middle_tokens=cfg.middle_tokens, suffix_tokens=cfg.suffix_tokens, max_context_length=cfg.max_context_length, generation_params=dict(cfg.generation_params), eos_sequences=cfg.eos_sequences, model_kwargs=cfg.model_kwargs if 'model_kwargs' in cfg else {}, context_parser=parser, left_context_ratio=cfg.left_context_ratio, add_extra_spaces_to_generation=cfg.tokenizer_fix ) elif cfg.generator_mode == 'lm':
logger = logging.getLogger("RealCode") logger.setLevel(logging.DEBUG) def seed_all(seed): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) @hydra.main(config_path="config", config_name="config") def main(cfg: DictConfig) -> None: seed_all(cfg.seed) print(cfg) dataset = load_dataset(cfg.dataset_root, cfg.dataset_meta_file, cfg.limit) logger.info(f"loaded {cfg.dataset_root} {cfg.dataset_meta_file}") if 'context_parser' in cfg: parser = hydra.utils.instantiate(cfg.context_parser) else: parser = TrivialContextParser() dtype_map = {'fp16': torch.float16, 'fp32': torch.float, 'bf16': torch.bfloat16} if cfg.generator_mode == 'infill': generator = InfillGenerator( add_extra_spaces_to_begin=0, model_path=cfg.model_path, dtype=dtype_map[cfg.dtype], num_samples=cfg.num_samples, prefix_tokens=cfg.prefix_tokens, middle_tokens=cfg.middle_tokens, suffix_tokens=cfg.suffix_tokens, max_context_length=cfg.max_context_length, generation_params=dict(cfg.generation_params), eos_sequences=cfg.eos_sequences, model_kwargs=cfg.model_kwargs if 'model_kwargs' in cfg else {}, context_parser=parser, left_context_ratio=cfg.left_context_ratio, add_extra_spaces_to_generation=cfg.tokenizer_fix ) elif cfg.generator_mode == 'lm':
generator = LMGenerator(
1
2023-12-12 12:43:06+00:00
8k
ENDEVSOLS/Long-Trainer
longtrainer/trainer.py
[ { "identifier": "DocumentLoader", "path": "longtrainer/loaders.py", "snippet": "class DocumentLoader:\n def load_csv(self, path):\n \"\"\"\n Load data from a CSV file at the specified path.\n\n Args:\n path (str): The file path to the CSV file.\n\n Returns:\n The loaded CSV data.\n\n Exceptions:\n Prints an error message if the CSV loading fails.\n \"\"\"\n try:\n loader = CSVLoader(file_path=path)\n data = loader.load()\n return data\n except Exception as e:\n print(f\"Error loading CSV: {e}\")\n\n def wikipedia_query(self, search_query):\n \"\"\"\n Query Wikipedia using a given search term and return the results.\n\n Args:\n search_query (str): The search term to query on Wikipedia.\n\n Returns:\n The query results.\n\n Exceptions:\n Prints an error message if the Wikipedia query fails.\n \"\"\"\n try:\n data = WikipediaLoader(query=search_query, load_max_docs=2).load()\n return data\n except Exception as e:\n print(f\"Error querying Wikipedia: {e}\")\n\n def load_urls(self, urls):\n \"\"\"\n Load and parse content from a list of URLs.\n\n Args:\n urls (list): A list of URLs to load.\n\n Returns:\n The loaded data from the URLs.\n\n Exceptions:\n Prints an error message if loading URLs fails.\n \"\"\"\n try:\n loader = UnstructuredURLLoader(urls=urls)\n data = loader.load()\n return data\n except Exception as e:\n print(f\"Error loading URLs: {e}\")\n\n def load_YouTubeVideo(self, urls):\n \"\"\"\n Load YouTube video information from provided URLs.\n\n Args:\n urls (list): A list of YouTube video URLs.\n\n Returns:\n The loaded documents from the YouTube URLs.\n\n Exceptions:\n Prints an error message if loading YouTube videos fails.\n \"\"\"\n try:\n loader = YoutubeLoader.from_youtube_url(\n urls, add_video_info=True, language=[\"en\", \"pt\", \"zh-Hans\", \"es\", \"ur\", \"hi\"],\n translation=\"en\")\n documents = loader.load()\n return documents\n except Exception as e:\n print(f\"Error loading YouTube video: {e}\")\n\n def load_pdf(self, path):\n \"\"\"\n Load data from a PDF file at the specified path.\n\n Args:\n path (str): The file path to the PDF file.\n\n Returns:\n The loaded and split PDF pages.\n\n Exceptions:\n Prints an error message if the PDF loading fails.\n \"\"\"\n try:\n loader = PyPDFLoader(path)\n pages = loader.load_and_split()\n return pages\n except Exception as e:\n print(f\"Error loading PDF: {e}\")\n\n def load_text_from_html(self, path):\n \"\"\"\n Load and parse text content from an HTML file at the specified path.\n\n Args:\n path (str): The file path to the HTML file.\n\n Returns:\n The loaded HTML data.\n\n Exceptions:\n Prints an error message if loading text from HTML fails.\n \"\"\"\n try:\n loader = BSHTMLLoader(path)\n data = loader.load()\n return data\n except Exception as e:\n print(f\"Error loading text from HTML: {e}\")\n\n def load_markdown(self, path):\n \"\"\"\n Load data from a Markdown file at the specified path.\n\n Args:\n path (str): The file path to the Markdown file.\n\n Returns:\n The loaded Markdown data.\n\n Exceptions:\n Prints an error message if loading Markdown fails.\n \"\"\"\n try:\n loader = UnstructuredMarkdownLoader(path)\n data = loader.load()\n return data\n except Exception as e:\n print(f\"Error loading Markdown: {e}\")\n\n def load_doc(self, path):\n \"\"\"\n Load data from a DOCX file at the specified path.\n\n Args:\n path (str): The file path to the DOCX file.\n\n Returns:\n The loaded DOCX data.\n\n Exceptions:\n Prints an error message if loading DOCX fails.\n \"\"\"\n try:\n loader = Docx2txtLoader(path)\n data = loader.load()\n return data\n except Exception as e:\n print(f\"Error loading DOCX: {e}\")" }, { "identifier": "TextSplitter", "path": "longtrainer/loaders.py", "snippet": "class TextSplitter:\n def __init__(self, chunk_size=1024, chunk_overlap=100):\n \"\"\"\n Initialize the TextSplitter with a specific chunk size and overlap.\n\n Args:\n chunk_size (int): The size of each text chunk.\n chunk_overlap (int): The overlap size between chunks.\n \"\"\"\n self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n\n def split_documents(self, documents):\n \"\"\"\n Split the provided documents into chunks based on the chunk size and overlap.\n\n Args:\n documents (list): A list of documents to be split.\n\n Returns:\n A list of split documents.\n\n Exceptions:\n Prints an error message if splitting documents fails.\n \"\"\"\n try:\n return self.text_splitter.split_documents(documents)\n except Exception as e:\n print(f\"Error splitting documents: {e}\")" }, { "identifier": "DocRetriever", "path": "longtrainer/retrieval.py", "snippet": "class DocRetriever:\n \"\"\"\n Advanced Document Retriever integrates retrieval techniques\n to efficiently retrieve documents based on provided queries.\n \"\"\"\n\n def __init__(self, documents, embedding_model, existing_faiss_index=None, num_k=3):\n \"\"\"\n Initializes the AdvancedDocumentRetriever with a set of documents and an embedding model.\n\n Args:\n documents (list): A list of documents to be indexed and retrieved.\n embedding_model (OpenAIEmbeddings): The embedding model used for document vectorization.\n \"\"\"\n try:\n self.embedding_model = embedding_model\n self.document_collection = documents\n self.faiss_index = existing_faiss_index\n\n if not documents:\n raise ValueError(\"Document collection is empty.\")\n\n if not self.faiss_index:\n # Index documents using FAISS\n self._index_documents()\n\n # Initialize BM25 and FAISS retrievers\n self.bm25_retriever = BM25Retriever.from_documents(documents)\n\n if self.faiss_index:\n self.faiss_retriever = self.faiss_index.as_retriever(search_kwargs={\"k\": num_k})\n else:\n self.faiss_retriever = None\n\n # Create an Ensemble Retriever combining BM25 and FAISS\n self.ensemble_retriever = EnsembleRetriever(\n retrievers=[self.bm25_retriever, self.faiss_retriever],\n weights=[0.5, 0.5]\n )\n except Exception as e:\n print(f\"Initialization error in AdvancedDocumentRetriever: {e}\")\n\n def _index_documents(self):\n \"\"\"\n Indexes the provided documents into the FAISS index for efficient retrieval.\n Handles large document collections by segmenting them into smaller batches.\n \"\"\"\n if self.faiss_index is None: # Only index if there's no existing FAISS index\n try:\n if len(self.document_collection) < 2000:\n self.faiss_index = FAISS.from_documents(self.document_collection, self.embedding_model)\n else:\n self.faiss_index = FAISS.from_documents(self.document_collection[:2000], self.embedding_model)\n for i in range(2000, len(self.document_collection), 2000):\n end_index = min(i + 2000, len(self.document_collection))\n additional_index = FAISS.from_documents(self.document_collection[i:end_index], self.embedding_model)\n self.faiss_index.merge_from(additional_index)\n except Exception as e:\n print(f\"Error indexing documents: {e}\")\n\n def save_index(self, file_path):\n \"\"\"\n Saves the FAISS index to a specified file path.\n\n Args:\n file_path (str): Path where the FAISS index will be saved.\n \"\"\"\n try:\n self.faiss_index.save_local(file_path)\n except Exception as e:\n print(f\"Error saving FAISS index: {e}\")\n\n def update_index(self, new_documents):\n \"\"\"\n Updates the FAISS index with new documents.\n\n Args:\n new_documents (list): A list of new documents to add to the index.\n \"\"\"\n # Add this method to handle updates to the existing index\n if not self.faiss_index:\n raise ValueError(\"FAISS index not initialized.\")\n if len(new_documents) < 2000:\n new_index = FAISS.from_documents(new_documents, self.embedding_model)\n else:\n # self.faiss_index = FAISS.from_documents(self.document_collection[:2000], self.embedding_model)\n new_index = FAISS.from_documents(new_documents[:2000], self.embedding_model)\n for i in range(2000, len(new_documents), 2000):\n end_index = min(i + 2000, len(new_documents))\n additional_index = FAISS.from_documents(self.new_documents[i:end_index], self.embedding_model)\n new_index.merge_from(additional_index)\n\n new_index = FAISS.from_documents(new_documents, self.embedding_model)\n self.faiss_index.merge_from(new_index)\n\n\n def delete_index(self, file_path):\n \"\"\"\n Deletes the FAISS index directory from the specified path.\n\n Args:\n file_path (str): Path of the FAISS index directory to be deleted.\n \"\"\"\n try:\n if os.path.exists(file_path):\n if os.path.isdir(file_path):\n shutil.rmtree(file_path)\n else:\n os.remove(file_path)\n else:\n print(\"FAISS index path does not exist.\")\n except Exception as e:\n print(f\"Error deleting FAISS index path: {e}\")\n\n def retrieve_documents(self):\n \"\"\"\n Retrieves relevant documents based on the provided query using the Ensemble Retriever.\n\n Args:\n query (str): Query string for retrieving relevant documents.\n\n Returns:\n A list of documents relevant to the query.\n \"\"\"\n try:\n return self.ensemble_retriever\n except Exception as e:\n print(f\"Error retrieving documents: {e}\")" }, { "identifier": "ChainBot", "path": "longtrainer/bot.py", "snippet": "class ChainBot:\n def __init__(self, retriever, llm, prompt, token_limit):\n \"\"\"\n Initialize the ChainBot with a retriever, language model (llm), prompt,\n and an optional maximum token limit.\n\n Args:\n retriever: The document retriever object.\n llm: Language learning model for generating responses.\n prompt (str): The initial prompt to start the conversation.\n max_token_limit (int, optional): Maximum token limit for the conversation buffer. Defaults to 200.\n \"\"\"\n try:\n # Memory and chain setup with dynamic max token limit\n self.memory = ConversationTokenBufferMemory(\n llm=llm,\n max_token_limit=token_limit,\n memory_key=\"chat_history\",\n return_messages=True,\n output_key='answer'\n )\n\n self.chain = ConversationalRetrievalChain.from_llm(\n llm=llm,\n retriever=retriever,\n return_source_documents=True,\n chain_type='stuff', # Modify as needed\n combine_docs_chain_kwargs={\"prompt\": prompt},\n memory=self.memory,\n verbose=False,\n )\n except Exception as e:\n # Handle any exceptions that occur during initialization\n print(f\"Error initializing ChainBot: {e}\")\n\n def get_chain(self):\n \"\"\"\n Retrieve the conversational retrieval chain.\n\n Returns:\n The ConversationalRetrievalChain instance.\n \"\"\"\n return self.chain" }, { "identifier": "VisionMemory", "path": "longtrainer/vision_bot.py", "snippet": "class VisionMemory:\n def __init__(self, token_limit, ensemble_retriever=None):\n model_name='gpt-4-1106-preview'\n self.llm = ChatOpenAI(model_name=model_name)\n self.memory = ConversationTokenBufferMemory(\n llm=self.llm,\n max_token_limit=token_limit,\n memory_key=\"chat_history\",\n return_messages=True,\n output_key='answer'\n )\n self.chat_history = []\n self.prompt_template = '''\n Act as Intelligent assistant\n {context}\n Your task is to answer the query with accurate answer using the chat history context.\n If the answer is unknown, admitting ignorance is preferred over fabricating a response. Dont need to add irrelevant text explanation in response.\n\n Chat History: {chat_history}\n\n Question: {question}\n\n Answer\n '''\n self.ensemble_retriever = ensemble_retriever\n\n def save_chat_history(self, query, answer):\n self.chat_history.append([query, answer])\n self.memory.save_context({\"input\": query}, {\"answer\": answer})\n\n def generate_prompt(self, query, additional_context):\n memory_history = self.memory.load_memory_variables({})\n return self.prompt_template.format(context=f\"you will answer the query from provided context: {additional_context}\", chat_history=memory_history, question=query)\n\n def get_answer(self, query):\n docs = self.ensemble_retriever.get_relevant_documents(query)\n prompt = self.generate_prompt(query, docs)\n return prompt" }, { "identifier": "VisionBot", "path": "longtrainer/vision_bot.py", "snippet": "class VisionBot:\n def __init__(self, prompt_template, max_tokens=1024):\n model_name = \"gpt-4-vision-preview\"\n self.vision_chain = ChatOpenAI(model=model_name, max_tokens=max_tokens)\n self.prompt_template = prompt_template # Save prompt template to instance variable\n self.human_message_content = [] # Initialize as an empty list\n\n def encode_image(self, image_path):\n with open(image_path, \"rb\") as image_file:\n return base64.b64encode(image_file.read()).decode('utf-8')\n\n def create_vision_bot(self, image_files):\n for file in image_files:\n encoded_image = self.encode_image(file) # Use the encode_image function\n image_snippet = {\n \"type\": \"image_url\",\n \"image_url\": {\"url\": f\"data:image/jpeg;base64,{encoded_image}\"} # Corrected key to \"url\"\n }\n self.human_message_content.append(image_snippet)\n\n def get_response(self, query):\n\n # Create a message with the current query\n self.human_message_content.insert(0, {\"type\": \"text\", \"text\": query})\n # Uncomment and modify the invoke call\n msg = self.vision_chain.invoke(\n [AIMessage(\n content=self.prompt_template # Use self.prompt_template\n ),\n HumanMessage(content=self.human_message_content)\n ]\n )\n return msg.content" } ]
from longtrainer.loaders import DocumentLoader, TextSplitter from longtrainer.retrieval import DocRetriever from longtrainer.bot import ChainBot from longtrainer.vision_bot import VisionMemory, VisionBot from langchain.embeddings import OpenAIEmbeddings from langchain.chat_models import ChatOpenAI from langchain.prompts import PromptTemplate from pymongo import MongoClient import uuid
3,935
class LongTrainer: def __init__(self, mongo_endpoint='mongodb://localhost:27017/', llm=None, embedding_model=None, prompt_template=None, max_token_limit=32000): """ Initialize the LongTrainer with optional language learning model, embedding model, prompt template, maximum token limit, and MongoDB endpoint. Args: mongo_endpoint (str): MongoDB connection string. llm: Language learning model, defaults to ChatOpenAI with GPT-4. embedding_model: Embedding model for document vectorization, defaults to OpenAIEmbeddings. prompt_template: Template for generating prompts, defaults to a predefined template. max_token_limit (int): Maximum token limit for the conversational buffer. """ self.llm = llm if llm is not None else ChatOpenAI(model_name='gpt-4-1106-preview') self.embedding_model = embedding_model if embedding_model is not None else OpenAIEmbeddings() self.prompt_template = prompt_template if prompt_template is not None else self._default_prompt_template() self.prompt = PromptTemplate(template=self.prompt_template, input_variables=["context", "chat_history", "question"]) self.max_token_limit = max_token_limit self.document_loader = DocumentLoader()
class LongTrainer: def __init__(self, mongo_endpoint='mongodb://localhost:27017/', llm=None, embedding_model=None, prompt_template=None, max_token_limit=32000): """ Initialize the LongTrainer with optional language learning model, embedding model, prompt template, maximum token limit, and MongoDB endpoint. Args: mongo_endpoint (str): MongoDB connection string. llm: Language learning model, defaults to ChatOpenAI with GPT-4. embedding_model: Embedding model for document vectorization, defaults to OpenAIEmbeddings. prompt_template: Template for generating prompts, defaults to a predefined template. max_token_limit (int): Maximum token limit for the conversational buffer. """ self.llm = llm if llm is not None else ChatOpenAI(model_name='gpt-4-1106-preview') self.embedding_model = embedding_model if embedding_model is not None else OpenAIEmbeddings() self.prompt_template = prompt_template if prompt_template is not None else self._default_prompt_template() self.prompt = PromptTemplate(template=self.prompt_template, input_variables=["context", "chat_history", "question"]) self.max_token_limit = max_token_limit self.document_loader = DocumentLoader()
self.text_splitter = TextSplitter(chunk_size=1024, chunk_overlap=100)
1
2023-12-07 16:37:26+00:00
8k
pan-x-c/EE-LLM
tools/retro/main.py
[ { "identifier": "get_args", "path": "megatron/global_vars.py", "snippet": "def get_args():\n \"\"\"Return arguments.\"\"\"\n _ensure_var_is_initialized(_GLOBAL_ARGS, 'args')\n return _GLOBAL_ARGS" }, { "identifier": "initialize_megatron", "path": "megatron/initialize.py", "snippet": "def initialize_megatron(\n extra_args_provider=None,\n args_defaults={},\n ignore_unknown_args=False,\n allow_no_cuda=False,\n):\n \"\"\"Set global variables, initialize distributed, and\n set autoresume and random seeds.\n `allow_no_cuda` should not be set unless using megatron for cpu only\n data processing. In general this arg should not be set unless you know\n what you are doing.\n Returns a function to finalize distributed env initialization\n (optionally, only when args.lazy_mpu_init == True)\n \"\"\"\n if not allow_no_cuda:\n # Make sure cuda is available.\n assert torch.cuda.is_available(), \"Megatron requires CUDA.\"\n\n # Parse arguments\n args = parse_args(extra_args_provider, ignore_unknown_args)\n\n if args.use_checkpoint_args or args_defaults.get(\"use_checkpoint_args\", False):\n print(\"load checkpoint args\")\n assert args.load is not None, \"--use-checkpoints-args requires --load argument\"\n load_args_from_checkpoint(args)\n\n validate_args(args, args_defaults)\n\n # set global args, build tokenizer, and set adlr-autoresume,\n # tensorboard-writer, and timers.\n set_global_variables(args)\n\n # torch.distributed initialization\n def finish_mpu_init():\n args = get_args()\n # Pytorch distributed.\n _initialize_distributed()\n\n # Random seeds for reproducibility.\n if args.rank == 0:\n print(\"> setting random seeds to {} ...\".format(args.seed))\n _set_random_seed(args.seed, args.data_parallel_random_init)\n\n args = get_args()\n if args.lazy_mpu_init:\n # TODO is this still a necessary option?\n args.use_cpu_initialization = True\n # delayed initialization of DDP-related stuff\n # We only set basic DDP globals\n mpu.set_tensor_model_parallel_world_size(args.tensor_model_parallel_size)\n # and return function for external DDP manager\n # to call when it has DDP initialized\n mpu.set_tensor_model_parallel_rank(args.rank)\n return finish_mpu_init\n else:\n # Megatron's MPU is the master. Complete initialization right away.\n finish_mpu_init()\n\n # Autoresume.\n _init_autoresume()\n\n # Compile dependencies.\n _compile_dependencies()\n\n # No continuation function\n return None" }, { "identifier": "print_rank_0", "path": "megatron/utils.py", "snippet": "def print_rank_0(message):\n \"\"\"If distributed is initialized, print only on rank 0.\"\"\"\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n print(message, flush=True)\n else:\n print(message, flush=True)" }, { "identifier": "set_retro_args", "path": "megatron/global_vars.py", "snippet": "def set_retro_args(retro_args):\n global _GLOBAL_RETRO_ARGS\n _GLOBAL_RETRO_ARGS = retro_args" }, { "identifier": "build_db", "path": "tools/retro/db/build.py", "snippet": "def build_db():\n '''Extract token chunks from each indexed dataset.\n\n Iterate each document of each indexed dataset, extract that document's\n chunks, and save to a 'DB' (hdf5 file).\n '''\n\n # Indexed dataset info.\n indexed_dataset_infos = init_indexed_dataset_infos()\n\n # Build dbs.\n build_individual_dbs(indexed_dataset_infos)\n\n # Single-process going forward.\n if torch.distributed.get_rank() != 0:\n return\n\n # Update n_chunks & save indexed dataset infos.\n if not os.path.exists(get_indexed_dataset_infos_path()):\n update_chunk_counts(indexed_dataset_infos)\n save_indexed_dataset_infos(indexed_dataset_infos)\n indexed_dataset_infos = get_indexed_dataset_infos()\n\n # Merge dbs.\n merge_dbs(indexed_dataset_infos, \"sampled\")\n merge_dbs(indexed_dataset_infos, \"train\")\n merge_dbs(indexed_dataset_infos, \"valid\")" }, { "identifier": "add_to_index", "path": "tools/retro/index/build.py", "snippet": "def add_to_index():\n '''Add DB chunks to index.'''\n\n args = get_retro_args()\n\n # Get index.\n index = IndexFactory.get_index(args.retro_index_type)\n\n # Get text dataset.\n gpt_dataset = get_merged_train_dataset()\n text_dataset = GPTToTextDataset(gpt_dataset)\n\n # Add to index.\n output_index_path = index.add(text_dataset)\n\n return output_index_path" }, { "identifier": "build_index", "path": "tools/retro/index/build.py", "snippet": "def build_index():\n '''Build index.\n\n Building index involves sequentially running stages above:\n - Train index (on sampled training chunks).\n - Add to index (on all training chunks).\n '''\n\n # Train index.\n train_index()\n\n # Add to index.\n add_to_index()" }, { "identifier": "train_index", "path": "tools/retro/index/build.py", "snippet": "def train_index():\n '''Train index on DB chunks.'''\n\n args = get_retro_args()\n\n # Check if trained index already exists.\n if not os.path.isfile(get_empty_index_path()):\n\n # Embed training chunks.\n embed_db()\n\n # Train index on embeddings.\n train_on_embeddings()\n\n # Wait for (single-process) training to complete.\n torch.distributed.barrier()\n\n # Remove embeddings.\n if args.retro_index_delete_training_embeddings:\n remove_embeddings()" }, { "identifier": "query_pretraining_neighbors", "path": "tools/retro/query/query.py", "snippet": "def query_pretraining_neighbors():\n '''Query pretraining datasets (train & valid).'''\n\n args = get_retro_args()\n\n # Num threads.\n faiss.omp_set_num_threads(64)\n\n # Load chunk db dataset.\n print_rank_0(\"load chunk db dataset.\")\n db_dataset = get_db_merged_train_dataset()\n db_dataset.load_doc_tuples()\n\n # Load index.\n print_rank_0(\" > get index.\")\n index = get_index()\n\n # Load datasets.\n print_rank_0(\" > get dataset map.\")\n query_dataset_map = get_query_dataset_map()\n\n # Bert embedder.\n embedder = BertEmbedder(args.retro_bert_batch_size,\n args.retro_bert_max_chunk_length,\n args.bert_embedder_type)\n\n # Query each (i.e., train, valid, test) dataset.\n print_rank_0(\" > query.\")\n for prefix, info in query_dataset_map.items():\n print_rank_0(\" > query '%s' dataset ... %d samples.\" %\n (prefix, len(info[\"data\"])))\n query_dataset_neighbors(db_dataset, info[\"data\"],\n prefix, info[\"neighbor_dir\"],\n index, embedder)" }, { "identifier": "get_args_path", "path": "tools/retro/utils.py", "snippet": "def get_args_path(workdir):\n '''Argument copy stored within retro workdir.'''\n return os.path.join(workdir, \"args.json\")" } ]
import json import os import torch from megatron import get_args, initialize_megatron, print_rank_0 from megatron.global_vars import set_retro_args from tools.retro.db import build_db from tools.retro.index import add_to_index, build_index, train_index from tools.retro.query import query_pretraining_neighbors from tools.retro.utils import get_args_path
3,944
# Index args. group.add_argument("--retro-index-nfeats", "-f", type=int, default=1024, help="Dimension of Bert embeddings. Bert-large is " "commonly used, so this value defaults to 1024.") group.add_argument("--retro-index-type", default="faiss-par-add", choices=["faiss-base", "faiss-par-add"], help="A 'faiss-base' index is a simple, un-optimized " "wrapper around a Faiss index. A 'faiss-par-add' index " "optimizes the 'add()' method by making it multi-node " "and multi-process, but with bit-wise equivalent " "results.") group.add_argument("--retro-index-str", required=True, help="Index string used for calling " "faiss.index_factory(). For example, " "'IVF262144_HNSW32,Flat' or " "'OPQ32_256,IVF4194304_HNSW32,PQ32'.") group.add_argument("--retro-index-ntrain", type=int, required=True, help="Number of database chunks to use for training " "the index. This value must be less or equal to the " "total number of chunks in the database.") group.add_argument("--retro-index-train-load-fraction", type=float, default=1., help="Fraction of sampled chunks to use for training " "the index. Useful when our total sampled embeddings " "use too much memory; lowering the load fraction is " "less costly than re-embedding a new sampled dataset " "from scratch.") group.add_argument("--retro-index-add-load-fraction", type=float, default=1., help="Fraction of database chunks to use for adding to " "the index. Useful when our total index size would " "use too much memory; lowering the load fraction is " "less costly than re-designing our token datasets.") group.add_argument("--retro-index-no-delete-training-embeddings", action='store_false', dest="retro_index_delete_training_embeddings", help="Skip deleting training embeddings for the search " "index. Useful for debugging.") group.add_argument("--retro-index-no-delete-added-codes", action='store_false', dest="retro_index_delete_added_codes", help="Skip deleting added codes for the search " "index. Useful for debugging.") # Query args. group.add_argument("--retro-query-ef-search", type=int, default=256, help="Index ef-search parameter for HNSW during querying.") group.add_argument("--retro-query-nprobe", type=int, default=65536, help="Index nprobe parameter for IVF during querying.") group.add_argument("--retro-query-num-neighbors-query", type=int, default=200, help="Number of neighbors to retrieve when calling " "index.search().") group.add_argument("--retro-query-num-neighbors-save", type=int, default=20, help="Number of neighbors to save to disk after " "the index's returned neighbors. If longer than target " "value, neighbors truncated; and if shorter than target " "value, neighbors are padded with -1's.") # Enforce argument naming convention. for action in group._group_actions: prefix = action.dest.split("_")[0] assert prefix == "retro", \ "Retro args must be prefixed with '--retro-*', for consistent " \ "styling. Please fix '%s'." % ", ".join(action.option_strings) return parser def save_args(args): '''Save copy of args within retro workdir.''' def default_dump(obj): if isinstance(obj, torch.dtype): return str(obj) else: raise Exception("specialize for <%s>." % type(obj).__name__) if torch.distributed.get_rank() == 0: args_path = get_args_path(args.retro_workdir) with open(args_path, "w") as f: json.dump(vars(args), f, indent=4, default=default_dump) torch.distributed.barrier() if __name__ == "__main__": # Initalize Megatron. initialize_megatron(extra_args_provider=add_retro_args) # Split retro tasks. args = get_args() args.retro_tasks = args.retro_tasks.split(",") # Save/set retro args. os.makedirs(args.retro_workdir, exist_ok=True) save_args(args) set_retro_args(args) # Select task to run. for task in args.retro_tasks: print_rank_0("start '%s'." % task) # Run all stages. if task == "build": build_db() torch.distributed.barrier() build_index() torch.distributed.barrier() query_pretraining_neighbors() # DB (i.e., chunk db). elif task == "db-build": build_db() # Index. elif task == "index-build": build_index() # calls both train + add. elif task == "index-train":
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. """Preprocess data for Retro. Stages (see argument '--retro-tasks'): - Build chunk database (DB). - Build index (train, add). - Query pretraining neighbors. """ def add_retro_args(parser): """Retro preprocesing arguments. *Note* : Arguments prefixed with '--retro-gpt-*' or '--retro-bert-*' are included and named as such to more easily handle managing both models running at the same time. Megatron is not optimized to run two models at once, so this naming convention makes it clearer. """ group = parser.add_argument_group(title="Retro preprocessing.") # Basic args. group.add_argument("--retro-tasks", default="build", help="Comma-separated list of tasks to run. Run entire " "preprocesing pipeline by using '--retro-tasks build'. " "Alternatively, run individual stages with tasks (in " "this order) 'db-build', 'index-build', or " "'query-pretraining-neighbors'. For example, " "'--retro-tasks db-build,index-build," "query-pretraining-neighbors' is equivalent to " "'--retro-tasks build'; or the argument can contain " "a subset of these tasks. Stages must always be run " "in the correct order (listed above).") group.add_argument("--retro-block-size", type=int, default=100000, help="Number of chunks to process at a time when " "generating Bert embeddings and querying the search " "index. Partial results for each block are generally " "saved to disk in separate files.") group.add_argument("--retro-doc-block-size", type=int, default=100000, help="Number of documents to processe at time when " "processing token datasets into chunk databases. The " "partial chunk database for each block is saved into " "a separate file.") # GPT args. group.add_argument('--retro-gpt-seed', type=int, default=1234, help='Random seed used for python, numpy, ' 'pytorch, and cuda.') group.add_argument('--retro-gpt-data-path', nargs='*', required=True, help='Path to the training dataset. Accepted format:' '1) a single data path, 2) multiple datasets in the' 'form: dataset1-weight dataset1-path dataset2-weight ' 'dataset2-path ... It is used with --split when a ' 'single dataset used for all three: train, valid ' 'and test. It is exclusive to the other ' '--*-data-path args') group.add_argument('--retro-gpt-split', type=str, default='969,30,1', help='Comma-separated list of proportions for training,' ' validation, and test split. For example the split ' '`90,5,5` will use 90%% of data for training, 5%% for ' 'validation and 5%% for test.') group.add_argument('--retro-gpt-mmap-warmup', action='store_true', help='Warm up mmap files.') group.add_argument("--retro-gpt-eval-interval", type=int, required=True, help="GPT evaluation interval.") group.add_argument("--retro-gpt-eval-iters", type=int, required=True, help="GPT evaluation iterations.") group.add_argument("--retro-gpt-tokenizer-type", required=True, help="GPT tokenizer type.") group.add_argument("--retro-gpt-vocab-file", help="GPT vocab file.") group.add_argument("--retro-gpt-merge-file", help="GPT merge file.") group.add_argument("--retro-gpt-tokenizer-model", help="GPT tokenizer model file.") group.add_argument("--retro-gpt-seq-length", type=int, required=True, help="GPT sequence length.") group.add_argument("--retro-gpt-global-batch-size", type=int, required=True, help="GPT global batch size.") group.add_argument("--retro-gpt-chunk-length", type=int, default=64, help="GPT chunk length.") # Bert args. group.add_argument("--retro-bert-vocab-file", required=True, help="Bert vocab file.") group.add_argument("--retro-bert-tokenizer-type", required=True, help="Bert tokenizer type (for when using " "'--bert-embedder-type megatron').") group.add_argument("--retro-bert-batch-size", type=int, default=128, help="Micro-batch size for processing Bert embeddings.") group.add_argument("--retro-bert-max-chunk-length", type=int, default=256, help="Maximum sequence length for Bert embeddings. " "(Named 'chunk' here in reference to these Bert " "sequences being converted from GPT chunks.)") # Index args. group.add_argument("--retro-index-nfeats", "-f", type=int, default=1024, help="Dimension of Bert embeddings. Bert-large is " "commonly used, so this value defaults to 1024.") group.add_argument("--retro-index-type", default="faiss-par-add", choices=["faiss-base", "faiss-par-add"], help="A 'faiss-base' index is a simple, un-optimized " "wrapper around a Faiss index. A 'faiss-par-add' index " "optimizes the 'add()' method by making it multi-node " "and multi-process, but with bit-wise equivalent " "results.") group.add_argument("--retro-index-str", required=True, help="Index string used for calling " "faiss.index_factory(). For example, " "'IVF262144_HNSW32,Flat' or " "'OPQ32_256,IVF4194304_HNSW32,PQ32'.") group.add_argument("--retro-index-ntrain", type=int, required=True, help="Number of database chunks to use for training " "the index. This value must be less or equal to the " "total number of chunks in the database.") group.add_argument("--retro-index-train-load-fraction", type=float, default=1., help="Fraction of sampled chunks to use for training " "the index. Useful when our total sampled embeddings " "use too much memory; lowering the load fraction is " "less costly than re-embedding a new sampled dataset " "from scratch.") group.add_argument("--retro-index-add-load-fraction", type=float, default=1., help="Fraction of database chunks to use for adding to " "the index. Useful when our total index size would " "use too much memory; lowering the load fraction is " "less costly than re-designing our token datasets.") group.add_argument("--retro-index-no-delete-training-embeddings", action='store_false', dest="retro_index_delete_training_embeddings", help="Skip deleting training embeddings for the search " "index. Useful for debugging.") group.add_argument("--retro-index-no-delete-added-codes", action='store_false', dest="retro_index_delete_added_codes", help="Skip deleting added codes for the search " "index. Useful for debugging.") # Query args. group.add_argument("--retro-query-ef-search", type=int, default=256, help="Index ef-search parameter for HNSW during querying.") group.add_argument("--retro-query-nprobe", type=int, default=65536, help="Index nprobe parameter for IVF during querying.") group.add_argument("--retro-query-num-neighbors-query", type=int, default=200, help="Number of neighbors to retrieve when calling " "index.search().") group.add_argument("--retro-query-num-neighbors-save", type=int, default=20, help="Number of neighbors to save to disk after " "the index's returned neighbors. If longer than target " "value, neighbors truncated; and if shorter than target " "value, neighbors are padded with -1's.") # Enforce argument naming convention. for action in group._group_actions: prefix = action.dest.split("_")[0] assert prefix == "retro", \ "Retro args must be prefixed with '--retro-*', for consistent " \ "styling. Please fix '%s'." % ", ".join(action.option_strings) return parser def save_args(args): '''Save copy of args within retro workdir.''' def default_dump(obj): if isinstance(obj, torch.dtype): return str(obj) else: raise Exception("specialize for <%s>." % type(obj).__name__) if torch.distributed.get_rank() == 0: args_path = get_args_path(args.retro_workdir) with open(args_path, "w") as f: json.dump(vars(args), f, indent=4, default=default_dump) torch.distributed.barrier() if __name__ == "__main__": # Initalize Megatron. initialize_megatron(extra_args_provider=add_retro_args) # Split retro tasks. args = get_args() args.retro_tasks = args.retro_tasks.split(",") # Save/set retro args. os.makedirs(args.retro_workdir, exist_ok=True) save_args(args) set_retro_args(args) # Select task to run. for task in args.retro_tasks: print_rank_0("start '%s'." % task) # Run all stages. if task == "build": build_db() torch.distributed.barrier() build_index() torch.distributed.barrier() query_pretraining_neighbors() # DB (i.e., chunk db). elif task == "db-build": build_db() # Index. elif task == "index-build": build_index() # calls both train + add. elif task == "index-train":
train_index() # train only
7
2023-12-07 08:29:38+00:00
8k
mitrefireline/simharness
simharness2/analytics/harness_analytics.py
[ { "identifier": "FireSimulationAnalytics", "path": "simharness2/analytics/simulation_analytics.py", "snippet": "class FireSimulationAnalytics(SimulationAnalytics):\n \"\"\"Use `FireSimulationAnalytics` to monitor the `fire_map` within a `FireSimulation`.\n\n Attributes:\n sim: TODO\n is_benchmark: TODO\n agent_analytics: TODO\n num_agents: TODO\n df: TODO\n df_cols: TODO\n df_dtypes: TODO\n df_index: TODO\n num_sim_steps: TODO\n active: TODO\n\n TODO: Add section for anything related to the interface for subclassers.\n \"\"\"\n\n def __init__(\n self,\n sim: FireSimulation,\n agent_analytics_partial: partial,\n is_benchmark: bool = False,\n save_history: bool = False,\n log_to_file: bool = False,\n file_type: str = \"csv\",\n custom_file_name: Optional[str] = None,\n ):\n \"\"\"TODO: A brief description of what the method is and what it's used for.\n\n TODO: Add any side effects that occur when executing the method.\n TODO: Add any exceptions that are raised.\n TODO: Add any restrictions on when the method can be called.\n\n Arguments:\n sim: The `FireSimulation` object that will be tracked.\n agent_analytics_partial: A `functools.partial` object that defines the class\n that will be used to monitor and track agent (s) behavior within\n `self.sim`.\n is_benchmark: TODO\n save_data: TODO\n \"\"\"\n super().__init__(sim, agent_analytics_partial, is_benchmark)\n\n self._is_benchmark = is_benchmark\n # Indicates if data from each timestep will be stored across the entire episode.\n self.save_history = save_history\n self.data = SimulationData(is_benchmark, save_history)\n\n # Helper attributes used to control the saving of data to a file.\n self.log_to_file = log_to_file\n self.file_type = file_type\n self.custom_file_name = custom_file_name\n\n self.num_sim_steps = 0\n\n # track if there exists a benchmark simulation that has been run already, value should be False within benchmark_simulation_analytics (if self._is_benchmark == True)\n self.benchmark_exists = False\n\n def update(self, timestep: int, benchmark_data: List[np.ndarray] = []) -> None:\n \"\"\"TODO Add docstring.\"\"\"\n # FIXME (afennelly): Remove this logic after simfire updates are merged!\n # Only access `active` attribute if the sim has been updated at least once.\n if self.sim.elapsed_steps != 0:\n self.active = self.sim.active\n\n # Prepare current timestep data.\n fire_map = self.sim.fire_map\n burned_total = np.sum(fire_map == BurnStatus.BURNED)\n burning_total = np.sum(fire_map == BurnStatus.BURNING)\n unburned_total = np.sum(fire_map == BurnStatus.UNBURNED)\n burn_rate = (burned_total + burning_total) / (timestep + 1.0)\n agent_speed = 4 # FIXME: Hardcoded - this is not always true.\n\n sim_timestep_dict = {\n \"sim_step\": self.num_sim_steps,\n \"timestep\": timestep,\n \"burned\": burned_total,\n \"burning\": burning_total,\n \"unburned\": unburned_total,\n \"burn_rate\": burn_rate,\n \"size\": fire_map.size,\n }\n\n if not self.is_benchmark:\n non_mitigated_total = burned_total + burning_total + unburned_total\n sim_timestep_dict.update(\n {\n \"mitigated\": fire_map.size - non_mitigated_total,\n # FIXME: Update for MARL case.\n # \"agent_interactions\": self.agent_analytics.num_interactions_since_last_sim_step, # noqa: E501\n # \"agent_movements\": self.agent_analytics.num_movements_since_last_sim_step, # noqa: E501\n }\n )\n\n # generate the comparison metrics if the benchmark simulation exists\n if self.benchmark_exists:\n # calculate the number of simulation steps taken by the benchmark simulation\n bench_sim_steps = len(benchmark_data)\n\n # calculate the amount damaged in the benchmark simulation at this current simulation step within the agent(s) simulation\n bench_num_damaged = 0\n if bench_sim_steps < self.num_sim_steps:\n # condition if the benchmark simulation ended faster than the agent(s) simulation\n bench_num_damaged = int(benchmark_data[-1])\n else:\n bench_num_damaged = int(benchmark_data[self.num_sim_steps - 1])\n\n # calculate the amount undamaged in the benchmark simulation at this current simulation step within the agent(s) simulation\n bench_num_unburned = int(fire_map.size) - bench_num_damaged\n\n # calculate the burn rate in the benchmark simulation at this current simulation step within the agent(s) simulation\n bench_burn_rate = bench_num_damaged / ((int(timestep) + 1.0) * 1.0)\n\n # calculate the total amount of damaged squares at the end of the benchmark simulation\n bench_total_damaged = int(benchmark_data[len(benchmark_data) - 1])\n\n # calculate the proportion of area saved between the agent(s) simulation and the benchmark simulation at this timestep\n area_saved_prop = float(\n (bench_num_damaged * 1.0 - (int(fire_map.size) - unburned_total))\n ) / (bench_total_damaged * 1.0)\n # add threshold to area_saved_prop so that it remains at -0.01 if the agent(s) simulation has damaged more area than the benchmark simulation at the timestep\n if area_saved_prop < 0.0:\n area_saved_prop = -0.01\n\n # update the sim_timestep_dict with the comparison metrics\n sim_timestep_dict.update(\n {\n \"area_saved\": (unburned_total - bench_num_unburned),\n \"burn_rate_reduction\": (bench_burn_rate - burn_rate),\n \"bench_episode_length\": ((bench_sim_steps) * agent_speed),\n \"timesteps_saved\": (\n (bench_sim_steps - self.num_sim_steps) * agent_speed\n ), # multiplied by the agent speed\n \"area_saved_prop\": (area_saved_prop),\n }\n )\n\n # Update the dataclass that stores the simulation's behavior.\n self.data.update(sim_timestep_dict)\n\n self.num_sim_steps += 1 # increment AFTER method logic is performed (convention).\n\n def reset(self, env_is_rendering: bool = False):\n \"\"\"Reset the attributes of `FireSimulationData` to initial values.\"\"\"\n\n # NOTE: either create new object or use dataclasses.replace()\n save_history = env_is_rendering and self.save_history\n self.data = SimulationData(self._is_benchmark, save_history)\n\n # Reset attributes used to store simulation behavior across a single episode.\n self.num_sim_steps = 0\n self.active = True\n\n # If we are tracking agent behavior, reset the `agent_analytics` object.\n if self.agent_analytics:\n self.agent_analytics.reset(env_is_rendering)" }, { "identifier": "ReactiveAgent", "path": "simharness2/agents/agent.py", "snippet": "class ReactiveAgent:\n \"\"\"A simple agent that reacts to its environment.\n\n FIXME: update docstring style, using llama2 suggestion for now.\n Parameters\n ----------\n agent_id : int\n The unique ID of this agent.\n sim_id : int\n The unique ID of the simulation this agent belongs to.\n initial_position : tuple[int, int]\n The (x,y) starting position of the agent, where (0,0) is the top-left corner of\n the map and (max_x, max_y) is the bottom-right corner of the map.\n\n Properties\n ----------\n x : int\n The current X coordinate of the agent.\n y : int\n The current Y coordinate of the agent.\n row : int\n The current row number where the agent resides.\n col : int\n The current column number where the agent resides.\n latest_movement : str or None\n The last movement made by the agent, if applicable.\n latest_interaction : str or None\n The last interaction had by the agent, if applicable.\n mitigation_placed : bool\n Whether the agent has placed any mitigations recently.\n moved_off_map : bool\n Whether the agent has moved off the map recently.\n\n \"\"\"\n\n # NOTE: `agent_speed` ommitted, only used within `_do_one_simulation_step`\n # Attrs that should be specified on initialization\n agent_id: Any # ex: \"agent_0\", \"dozer_0\", \"handcrew_0\", \"ff_0\", etc.\n sim_id: int # should be contained within sim.agents.keys()\n initial_position: Tuple[int, int]\n\n # Attributes with default values\n latest_movement: int = None\n latest_interaction: int = None\n mitigation_placed: bool = False\n moved_off_map: bool = False\n\n def __post_init__(self):\n self._current_position = self.initial_position\n self.x, self.y = self.initial_position\n self.row, self.col = self.y, self.x\n\n @property\n def current_position(self) -> Tuple[int, int]:\n return self._current_position\n\n @current_position.setter\n def current_position(self, value: Tuple[int, int]):\n self._current_position = value\n self.x, self.y = value\n self.row, self.col = self.y, self.x\n\n @property\n def x(self) -> int:\n return self._current_position[0]\n\n @x.setter\n def x(self, value: int):\n self._current_position = (value, self.y)\n\n @property\n def y(self) -> int:\n return self._current_position[1]\n\n @y.setter\n def y(self, value: int):\n self._current_position = (self.x, value)\n\n @property\n def row(self) -> int:\n return self._current_position[1]\n\n @row.setter\n def row(self, value: int):\n self._current_position = (self.x, value)\n\n @property\n def col(self) -> int:\n return self._current_position[0]\n\n @col.setter\n def col(self, value: int):\n self._current_position = (value, self.y)\n\n def reset(self):\n self.latest_movement = None\n self.latest_interaction = None\n self.mitigation_placed = False\n self.moved_off_map = False\n self.__post_init__()\n # self.current_position = self.initial_position\n # self.reward = 0\n\n # def move(self, env: np.ndarray, direction: int) -> bool:\n # \"\"\"Moves the agent in the given direction if possible.\"\"\"\n # current_x, current_y = self.current_position\n # dx, dy = self.actions[direction]\n # next_x, next_y = current_x + dx, current_y + dy\n\n # if env[next_y][next_x] == \"_\":\n # self.current_position = (next_x, next_y)\n # return True\n # else:\n # return False" } ]
import logging from abc import ABC, abstractmethod from dataclasses import dataclass from functools import partial from typing import Any, Optional, Dict from simfire.sim.simulation import FireSimulation from simharness2.analytics.simulation_analytics import FireSimulationAnalytics from simharness2.agents import ReactiveAgent
3,829
logger = logging.getLogger("ray.rllib") class RLHarnessAnalytics(ABC): """Base class with several built in methods.""" def __init__( self, *, sim: FireSimulation, sim_analytics_partial: partial, # use_benchmark_sim: bool = False benchmark_sim: FireSimulation = None, ) -> None: """TODO: Add docstring.""" # Store objects used to track simulation data within each episode in a run. try: self.sim_analytics: FireSimulationAnalytics = sim_analytics_partial(sim=sim) if benchmark_sim: self.benchmark_sim_analytics: FireSimulationAnalytics = ( sim_analytics_partial(sim=benchmark_sim, is_benchmark=True) ) else: self.benchmark_sim_analytics: FireSimulationAnalytics = None except TypeError as e: raise e self.best_episode_performance: Optional[BestEpisodePerformance] = None @abstractmethod def update_after_one_simulation_step(self, *, timestep: int) -> None: """See subclass for docstring.""" pass @abstractmethod def update_after_one_agent_step( self, *, timestep: int, ) -> None: """See subclass for docstring.""" pass @abstractmethod def update_after_one_harness_step( self, sim_run: bool, terminated: bool, reward: float, *, timestep: int ) -> None: """See subclass for docstring.""" pass @abstractmethod def reset(self): """See subclass for docstring.""" pass class ReactiveHarnessAnalytics(RLHarnessAnalytics): """TODO Add description.""" def __init__( self, *, sim: FireSimulation, sim_analytics_partial: partial, agent_ids: set, benchmark_sim: FireSimulation = None, ) -> None: """TODO Add summary line. Arguments: sim: The underlying `FireSimulation` object that contains the agent (s) that are being trained. The agent (s) will place mitigation lines, and the simulation will spread the fire. An episode terminates when the fire is finished spreading. sim_analytics_partial: A `functools.partial` object that defines the class that willbbe used to monitor and track `self.sim`, and `self.benchmark_sim`, if the optional `benchmark_sim` is provided. The user is expected to provide the `agent_analytics_partial` keyword argument, along with a valid value. agent_ids: TODO benchmark_sim: A separate `FireSimulation` object, identical to `sim` (after initialization). No mitigation lines will be placed in this simulation, as it does not contain any agent (s). Raises: TypeError: If `sim_analytics_partial.keywords` does not contain a `agent_analytics_partial` key with value of type `functools.partial`. """ # NOTE: Below is a hacky way to specify agent ids; Fix later # Inject `agent_ids` into keywords of `agent_analytics_partial` agent_partial: partial = sim_analytics_partial.keywords["agent_analytics_partial"] agent_partial.keywords.update({"agent_ids": agent_ids}) sim_analytics_partial.keywords["agent_analytics_partial"] = agent_partial # Initialize sim_analytics object (s) and best_episode_performance attribute. super().__init__( sim=sim, sim_analytics_partial=sim_analytics_partial, benchmark_sim=benchmark_sim, ) # Define attributes that are needed/accessed within `ComprehensiveReward` class. # TODO: Address where these attributes should be stored, see # https://gitlab.mitre.org/fireline/reinforcementlearning/simharness2/-/merge_requests/6#note_1504742 if self.benchmark_sim_analytics: # track the existence of the benchmark sim to generate the comparative (ex. area saved or burn rate reduction) metrics self.sim_analytics.benchmark_exists = True # Track the latest episode reward # TODO is this the reward for the latest timestep or the latest episode? # FIXME: Decide how and where this attribute is/should be used. self.latest_reward = 0.0 self.episodes_total = 0 def update_after_one_agent_step( self, *, timestep: int,
"""Base AnalyticsTracker for SimHarness and BaseReward.""" logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter( logging.Formatter("%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s") ) logger.addHandler(handler) logger.propagate = False logger = logging.getLogger("ray.rllib") class RLHarnessAnalytics(ABC): """Base class with several built in methods.""" def __init__( self, *, sim: FireSimulation, sim_analytics_partial: partial, # use_benchmark_sim: bool = False benchmark_sim: FireSimulation = None, ) -> None: """TODO: Add docstring.""" # Store objects used to track simulation data within each episode in a run. try: self.sim_analytics: FireSimulationAnalytics = sim_analytics_partial(sim=sim) if benchmark_sim: self.benchmark_sim_analytics: FireSimulationAnalytics = ( sim_analytics_partial(sim=benchmark_sim, is_benchmark=True) ) else: self.benchmark_sim_analytics: FireSimulationAnalytics = None except TypeError as e: raise e self.best_episode_performance: Optional[BestEpisodePerformance] = None @abstractmethod def update_after_one_simulation_step(self, *, timestep: int) -> None: """See subclass for docstring.""" pass @abstractmethod def update_after_one_agent_step( self, *, timestep: int, ) -> None: """See subclass for docstring.""" pass @abstractmethod def update_after_one_harness_step( self, sim_run: bool, terminated: bool, reward: float, *, timestep: int ) -> None: """See subclass for docstring.""" pass @abstractmethod def reset(self): """See subclass for docstring.""" pass class ReactiveHarnessAnalytics(RLHarnessAnalytics): """TODO Add description.""" def __init__( self, *, sim: FireSimulation, sim_analytics_partial: partial, agent_ids: set, benchmark_sim: FireSimulation = None, ) -> None: """TODO Add summary line. Arguments: sim: The underlying `FireSimulation` object that contains the agent (s) that are being trained. The agent (s) will place mitigation lines, and the simulation will spread the fire. An episode terminates when the fire is finished spreading. sim_analytics_partial: A `functools.partial` object that defines the class that willbbe used to monitor and track `self.sim`, and `self.benchmark_sim`, if the optional `benchmark_sim` is provided. The user is expected to provide the `agent_analytics_partial` keyword argument, along with a valid value. agent_ids: TODO benchmark_sim: A separate `FireSimulation` object, identical to `sim` (after initialization). No mitigation lines will be placed in this simulation, as it does not contain any agent (s). Raises: TypeError: If `sim_analytics_partial.keywords` does not contain a `agent_analytics_partial` key with value of type `functools.partial`. """ # NOTE: Below is a hacky way to specify agent ids; Fix later # Inject `agent_ids` into keywords of `agent_analytics_partial` agent_partial: partial = sim_analytics_partial.keywords["agent_analytics_partial"] agent_partial.keywords.update({"agent_ids": agent_ids}) sim_analytics_partial.keywords["agent_analytics_partial"] = agent_partial # Initialize sim_analytics object (s) and best_episode_performance attribute. super().__init__( sim=sim, sim_analytics_partial=sim_analytics_partial, benchmark_sim=benchmark_sim, ) # Define attributes that are needed/accessed within `ComprehensiveReward` class. # TODO: Address where these attributes should be stored, see # https://gitlab.mitre.org/fireline/reinforcementlearning/simharness2/-/merge_requests/6#note_1504742 if self.benchmark_sim_analytics: # track the existence of the benchmark sim to generate the comparative (ex. area saved or burn rate reduction) metrics self.sim_analytics.benchmark_exists = True # Track the latest episode reward # TODO is this the reward for the latest timestep or the latest episode? # FIXME: Decide how and where this attribute is/should be used. self.latest_reward = 0.0 self.episodes_total = 0 def update_after_one_agent_step( self, *, timestep: int,
agents: Dict[Any, ReactiveAgent],
1
2023-12-08 19:13:31+00:00
8k
racinette/querky
querky/query.py
[ { "identifier": "logger", "path": "querky/logger.py", "snippet": "" }, { "identifier": "QueryInitializationError", "path": "querky/exceptions.py", "snippet": "class QueryInitializationError(Exception):\n def __init__(self, query: Query, additional_hint: str | None = None) -> None:\n self.message = f\"{query.string_signature()}:\\n{query.sql}\"\n if additional_hint is not None:\n self.message += f\"\\n{additional_hint}\"\n super().__init__(self.message)" }, { "identifier": "ReprHelper", "path": "querky/helpers.py", "snippet": "class ReprHelper:\n def __init__(self, name: str):\n self.name = name\n\n def __repr__(self):\n return self.name" }, { "identifier": "DictGetAttr", "path": "querky/helpers.py", "snippet": "class DictGetAttr:\n def __init__(self, d: dict) -> None:\n self.__d = d\n\n def __getattr__(self, item: str):\n return self.__d[item]" }, { "identifier": "TypeKnowledge", "path": "querky/base_types.py", "snippet": "class TypeKnowledge(GetImportsMixin):\n metadata: TypeMetaData\n is_array: bool\n is_optional: bool | None\n elem_is_optional: bool | None = None\n typehint: str | None = None\n userhint: typing.Any | None = None\n required_imports: set[str] | None = None\n\n def __post_init__(self):\n self.set_userhint(self.userhint)\n\n def set_userhint(self, userhint: typing.Any):\n if userhint is None or userhint is inspect._empty:\n # пользователь не предоставил аннотацию\n return\n if userhint == typing.Optional:\n # пользователь явно указал, что этот аргумент опционален\n self.is_optional = True\n elif isinstance(userhint, str):\n # пользователь явно указал аннотацию - мы будем использовать ее прямо в таком же виде, как указано\n self.userhint = userhint\n self.typehint = self.userhint\n else:\n raise NotImplementedError(\n \"Type annotation is a live object.\\n\"\n \"It is impossible to copy safely between files.\\n\"\n \"Placing it between parenthesis, thus making it a raw string, should do the trick.\\n\"\n \"If you need to import something inside the generated file for this annotation to work, \"\n \"use `__imports__ = [<your imports as raw strings>]` in your source file.\"\n )\n\n def get_imports(self) -> set[str]:\n s = self.metadata.get_imports()\n if self.required_imports is not None:\n s.update(self.required_imports)\n return s\n\n def add_import(self, s: str) -> None:\n if self.required_imports is None:\n self.required_imports = set()\n self.required_imports.add(s)" }, { "identifier": "QuerySignature", "path": "querky/base_types.py", "snippet": "class QuerySignature:\n def __init__(self, parameters: typing.Tuple[TypeKnowledge, ...], attributes: typing.Tuple[ResultAttribute, ...]):\n self.parameters = parameters\n self.attributes = attributes" }, { "identifier": "ConnParamConfig", "path": "querky/conn_param_config.py", "snippet": "class ConnParamConfig:\n name: str\n\n def create_parameter(\n self,\n query: Query,\n parameters: typing.Sequence[Parameter],\n type_metadata: TypeMetaData\n ) -> tuple[Parameter, TypeKnowledge, int]:\n ..." }, { "identifier": "ParamMapper", "path": "querky/param_mapper.py", "snippet": "class ParamMapper(typing.Generic[M]):\n def __init__(self, query: Query):\n self.query = query\n self.count = 0\n self.params: typing.List[M] = []\n self.positional: typing.List[M] = []\n self.keyword: typing.Dict[str, M] = dict()\n self.defaults: dict[str, typing.Any] = dict()\n\n for index, (name, param) in zip(range(len(query.sig.parameters)), query.sig.parameters.items()):\n param: Parameter\n if param.kind in [Parameter.VAR_KEYWORD, Parameter.VAR_POSITIONAL]:\n raise TypeError(\"Neither positional nor keyword varargs are supported\")\n mapped_param = self.create_param(index, name, param)\n self.params.append(mapped_param)\n if param.kind in [Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD]:\n self.positional.append(mapped_param)\n elif param.kind == Parameter.KEYWORD_ONLY:\n self.keyword[name] = mapped_param\n else:\n raise NotImplementedError(param.kind)\n if param.default is not inspect._empty:\n self.defaults[name] = param.default\n\n @abstractmethod\n def assign_type_knowledge(self, t: typing.Tuple[TypeKnowledge, ...]):\n \"\"\"\n Should be callin' set_type_knowledge on dem params.\n \"\"\"\n ...\n\n def mirror_arguments(self) -> str:\n arr = []\n\n for arg in self.positional:\n arr.append(arg.name)\n for kwarg in self.keyword.keys():\n arr.append(f'{kwarg}={kwarg}')\n\n return ', '.join(arr)\n\n def parametrize_query(self) -> str:\n sql = self.query.query(*self.positional, **self.keyword)\n return sql\n\n @abstractmethod\n def map_params(self, *args, **kwargs):\n ...\n\n @abstractmethod\n def create_param(self, index: int, name: str, param: Parameter) -> M:\n ..." }, { "identifier": "attr", "path": "querky/attr.py", "snippet": "class Attr:\nclass AttrProxy:\n def __init__(self, attr_proxy: AttrProxy, name: str) -> None:\n def __call__(self, annotation: typing.Optional[str] = None, *, optional: typing.Optional[bool] = None) -> str:\n def __neg__(self) -> str:\n def __pos__(self) -> str:\n def __init__(self):\n def __getattr__(self, name: str) -> Attr:\n def __getattrs__(self) -> typing.List[Attr]:" }, { "identifier": "Value", "path": "querky/result_shape.py", "snippet": "class Value(ResultShape):\n def __init__(self, query: Query, annotation: str | TypeMetaData | None = None, *, optional: bool = False):\n super().__init__(query)\n self.annotation = annotation\n self.attribute: ResultAttribute | None = None\n self.optional = optional\n\n def set_attributes(self, attrs: tuple[ResultAttribute, ...]):\n if len(attrs) != 1:\n raise TypeError(\n f\"{self.query.string_signature()}\\n\"\n f\"Query is declared to return a single attribute, but it returns: {len(attrs)}.\"\n )\n attr = attrs[0]\n if isinstance(self.annotation, TypeMetaData):\n type_knowledge = TypeKnowledge(\n metadata=self.annotation,\n is_array=False,\n is_optional=self.optional\n )\n self.attribute = ResultAttribute(\n name=attr.name,\n index=attr.index,\n type_knowledge=type_knowledge\n )\n else:\n self.attribute = attr\n type_knowledge = self.attribute.type_knowledge\n type_knowledge.typehint = self.annotation\n type_knowledge.is_optional = self.optional\n self.return_type = type_knowledge\n self.annotate()\n\n def annotate(self):\n self.query.annotation_generator.annotate(self.return_type, 'attribute')\n\n def generate_type_code(self) -> typing.List[str] | None:\n return None\n\n async def fetch(self, conn, bound_params):\n contract = self.query.module.querky.contract\n return await contract.fetch_value(conn, self.query, bound_params)\n\n def fetch_sync(self, conn, bound_params):\n contract = self.query.module.querky.contract\n return contract.fetch_value_sync(conn, self.query, bound_params)\n\n def get_exports(self) -> typing.Sequence[str]:\n return []" }, { "identifier": "Column", "path": "querky/result_shape.py", "snippet": "class Column(Value):\n def __init__(self, query: Query, annotation: str | TypeMetaData | None = None, *, elem_optional: bool = True):\n super().__init__(query, annotation, optional=False)\n self.elem_optional = elem_optional\n\n def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):\n super().set_attributes(attrs)\n type_knowledge = self.attribute.type_knowledge\n type_knowledge.is_array = True\n type_knowledge.is_optional = False\n type_knowledge.elem_is_optional = self.elem_optional\n self.query.annotation_generator.annotate(self.return_type, 'attribute')\n\n def annotate(self):\n pass\n\n async def fetch(self, conn, params):\n contract = self.query.module.querky.contract\n return await contract.fetch_column(conn, self.query, params)\n\n def fetch_sync(self, conn, params):\n contract = self.query.module.querky.contract\n return contract.fetch_column_sync(conn, self.query, params)" }, { "identifier": "Status", "path": "querky/result_shape.py", "snippet": "class Status(ResultShape):\n def get_annotation(self) -> str:\n return 'str'\n\n def generate_type_code(self) -> typing.List[str] | None:\n return None\n\n def get_imports(self) -> set[str]:\n return set()\n\n async def fetch(self, conn, bound_params):\n contract = self.query.module.querky.contract\n return await contract.fetch_status(conn, self.query, bound_params)\n\n def fetch_sync(self, conn, bound_params):\n contract = self.query.module.querky.contract\n return contract.fetch_status(conn, self.query, bound_params)\n\n def set_attributes(self, attr: typing.Tuple[ResultAttribute, ...]):\n pass\n\n def get_exports(self) -> typing.Sequence[str]:\n return []" }, { "identifier": "All", "path": "querky/result_shape.py", "snippet": "class All(One):\n def __init__(self, query: Query, typename: str | None,):\n super().__init__(query, typename, optional=False)\n self.return_type.is_optional = False\n self.return_type.is_array = True\n self.return_type.elem_is_optional = False\n self.query.annotation_generator.annotate(self.return_type, context='result_type')\n\n def annotate(self):\n pass\n\n async def fetch(self, conn, params):\n contract = self.query.module.querky.contract\n rows = await contract.fetch_all(conn, self.query, params)\n if self.ctor.row_factory:\n rows = [\n self.ctor.row_factory(row)\n for row in rows\n ]\n return rows\n\n def fetch_sync(self, conn, params):\n contract = self.query.module.querky.contract\n rows = contract.fetch_all_sync(conn, self.query, params)\n if self.ctor.row_factory:\n rows = [\n self.ctor.row_factory(row)\n for row in rows\n ]\n return rows" }, { "identifier": "One", "path": "querky/result_shape.py", "snippet": "class One(ResultShape):\n def __init__(self, query: Query, typename: str | None, *, optional: bool = True):\n super().__init__(query)\n\n if self.query.parent_query is None:\n if self.querky.type_factory is not None:\n self.ctor = self.querky.type_factory(self.query, typename)\n else:\n self.ctor = None\n else:\n # забираем конструктор типа из базового запроса\n parent_shape = self.query.parent_query.shape\n if not isinstance(parent_shape, (All, One)):\n raise ValueError(\"Invalid shape, must be a row shape\")\n\n self.ctor = parent_shape.ctor\n # копируем название типа из отеческого запроса\n typename = parent_shape.ctor.typename\n\n if self.ctor.shape is None:\n self.ctor.shape = self\n\n self.optional = optional\n if self.ctor is not None:\n type_meta = TypeMetaData(typename)\n else:\n type_meta = self.query.contract.get_default_record_type_metadata()\n self.return_type = TypeKnowledge(\n metadata=type_meta,\n is_optional=self.optional,\n is_array=False,\n elem_is_optional=None\n )\n self.annotate()\n\n def annotate(self):\n self.query.annotation_generator.annotate(self.return_type, context='result_type')\n\n def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):\n for attribute in self.query.query_signature.attributes:\n try:\n if attr_hint := self.query.attr_hints.get(attribute.name, None):\n attribute.consume_attr(attr_hint)\n self.query.annotation_generator.annotate(attribute.type_knowledge, 'attribute')\n except Exception as ex:\n raise QueryInitializationError(self.query, f\"attribute `{attribute.name}`\") from ex\n if self.ctor is not None:\n if self.ctor.attributes is None:\n self.ctor.set_attributes(attrs)\n elif self.ctor.attributes != attrs:\n raise QueryInitializationError(\n self.query,\n \"Expected the same return type signature, but the attributes are not equal:\\n\"\n f\"Expected: {self.ctor.attributes}\\n\"\n f\"Got: {attrs}\"\n )\n\n def generate_type_code(self) -> typing.List[str] | None:\n if self.ctor is not None and not self.ctor.type_code_generated:\n return self.ctor.generate_type_code()\n else:\n return None\n\n def get_imports(self) -> set[str]:\n s = super().get_imports()\n if self.ctor is not None:\n return s.union(self.ctor.get_imports())\n return s\n\n async def fetch(self, conn, params):\n contract = self.query.module.querky.contract\n row = await contract.fetch_one(conn, self.query, params)\n if self.ctor.row_factory and row is not None:\n row = self.ctor.row_factory(row)\n return row\n\n def fetch_sync(self, conn, params):\n contract = self.query.module.querky.contract\n row = contract.fetch_one_sync(conn, self.query, params)\n if self.ctor.row_factory:\n row = self.ctor.row_factory(row)\n return row\n\n def get_exports(self) -> typing.Sequence[str]:\n if self.ctor is not None:\n return [self.ctor.get_exported_name()]\n else:\n return []" }, { "identifier": "ResultShape", "path": "querky/result_shape.py", "snippet": "class ResultShape(ABC, GetImportsMixin):\n def __init__(self, query: Query) -> None:\n self.query: Query = query\n self.return_type: TypeKnowledge | None = None\n\n @property\n def querky(self):\n return self.query.querky\n\n def get_imports(self) -> set[str]:\n return self.return_type.get_imports()\n\n @abstractmethod\n def set_attributes(self, attrs: typing.Tuple[ResultAttribute, ...]):\n ...\n\n @abstractmethod\n def generate_type_code(self) -> typing.List[str] | None:\n ...\n\n def get_annotation(self) -> str:\n return self.return_type.typehint\n\n @abstractmethod\n async def fetch(self, conn, bound_params):\n ...\n\n @abstractmethod\n async def fetch_sync(self, conn, bound_params):\n ...\n\n @abstractmethod\n def get_exports(self) -> typing.Sequence[str]:\n ..." } ]
import inspect import typing from inspect import Parameter from os import path from querky.logger import logger from querky.exceptions import QueryInitializationError from querky.helpers import ReprHelper, DictGetAttr from querky.base_types import TypeKnowledge, QuerySignature from querky.conn_param_config import ConnParamConfig from querky.param_mapper import ParamMapper from querky.attr import attr as _attr_, Attr from querky.result_shape import Value, Column, Status, All, One, ResultShape from querky.module_constructor import ModuleConstructor
4,486
from __future__ import annotations if typing.TYPE_CHECKING: RS = typing.TypeVar('RS', bound='ResultShape') class Query(typing.Generic[RS]): defaults: dict[str, typing.Any] def __init__( self, func: typing.Callable, shape: typing.Callable[[Query], RS], module: ModuleConstructor, conn_param_config: ConnParamConfig, explicit_name: typing.Optional[str], parent_query: typing.Optional[Query[One | All]], kwargs: typing.Optional[typing.Dict[str, typing.Any]] ) -> None: self.parent_query: Query[One | All] | None = parent_query self.imports = set() self.kwargs = kwargs or dict() self.query = func self.name = explicit_name or func.__name__ self.conn_param_config = conn_param_config self.sig = inspect.signature(func) self.template_signature = None self.module = module self.module.queries_list.append(self) self.param_mapper: ParamMapper = self.contract.create_param_mapper(self) self.sql = self.param_mapper.parametrize_query() self.default = DictGetAttr(self.param_mapper.defaults) # side effect: attr gets populated, so we flush it self.attr_hints: dict[str, Attr] = { a.name: a for a in _attr_.__getattrs__() } module_filename = self.module.module.__file__ common = path.commonprefix([module.querky.basedir, module_filename]) self.relative_path = module_filename[len(common):] self.unique_name = f"{self.relative_path}:{self.query.__name__}" self.local_name = self.get_local_name() self.query_signature: QuerySignature | None = None self.conn_type_knowledge: TypeKnowledge | None = None self.bound_type = None self.shape: ResultShape = shape(self) if not isinstance(self.shape, (One, All)) and parent_query: raise ValueError("Only One and All queries can have a parent query.") if parent_query and not isinstance(parent_query.shape, (One, All)): raise ValueError("Parent query must be of either One or All shape.") logger.debug( "Query: %s\nSQL: %s", self.unique_name, self.sql ) @property def annotation_generator(self): return self.querky.annotation_generator @property def contract(self): return self.module.querky.contract @property def querky(self): return self.module.querky def bind_type(self, t) -> None: self.bound_type = t async def execute(self, conn, *args, **kwargs): params = self.param_mapper.map_params(*args, **kwargs) return await self.shape.fetch(conn, params) def execute_sync(self, conn, *args, **kwargs): params = self.param_mapper.map_params(*args, **kwargs) return self.shape.fetch_sync(conn, params) def _after_types_fetched(self): # типы параметров передадим мапперу self.param_mapper.assign_type_knowledge(self.query_signature.parameters) # а типы аттрибутов - результату self.shape.set_attributes(self.query_signature.attributes) async def fetch_types(self, db) -> None: try: self.query_signature = await self.contract.get_query_signature(db, self) self._after_types_fetched()
from __future__ import annotations if typing.TYPE_CHECKING: RS = typing.TypeVar('RS', bound='ResultShape') class Query(typing.Generic[RS]): defaults: dict[str, typing.Any] def __init__( self, func: typing.Callable, shape: typing.Callable[[Query], RS], module: ModuleConstructor, conn_param_config: ConnParamConfig, explicit_name: typing.Optional[str], parent_query: typing.Optional[Query[One | All]], kwargs: typing.Optional[typing.Dict[str, typing.Any]] ) -> None: self.parent_query: Query[One | All] | None = parent_query self.imports = set() self.kwargs = kwargs or dict() self.query = func self.name = explicit_name or func.__name__ self.conn_param_config = conn_param_config self.sig = inspect.signature(func) self.template_signature = None self.module = module self.module.queries_list.append(self) self.param_mapper: ParamMapper = self.contract.create_param_mapper(self) self.sql = self.param_mapper.parametrize_query() self.default = DictGetAttr(self.param_mapper.defaults) # side effect: attr gets populated, so we flush it self.attr_hints: dict[str, Attr] = { a.name: a for a in _attr_.__getattrs__() } module_filename = self.module.module.__file__ common = path.commonprefix([module.querky.basedir, module_filename]) self.relative_path = module_filename[len(common):] self.unique_name = f"{self.relative_path}:{self.query.__name__}" self.local_name = self.get_local_name() self.query_signature: QuerySignature | None = None self.conn_type_knowledge: TypeKnowledge | None = None self.bound_type = None self.shape: ResultShape = shape(self) if not isinstance(self.shape, (One, All)) and parent_query: raise ValueError("Only One and All queries can have a parent query.") if parent_query and not isinstance(parent_query.shape, (One, All)): raise ValueError("Parent query must be of either One or All shape.") logger.debug( "Query: %s\nSQL: %s", self.unique_name, self.sql ) @property def annotation_generator(self): return self.querky.annotation_generator @property def contract(self): return self.module.querky.contract @property def querky(self): return self.module.querky def bind_type(self, t) -> None: self.bound_type = t async def execute(self, conn, *args, **kwargs): params = self.param_mapper.map_params(*args, **kwargs) return await self.shape.fetch(conn, params) def execute_sync(self, conn, *args, **kwargs): params = self.param_mapper.map_params(*args, **kwargs) return self.shape.fetch_sync(conn, params) def _after_types_fetched(self): # типы параметров передадим мапперу self.param_mapper.assign_type_knowledge(self.query_signature.parameters) # а типы аттрибутов - результату self.shape.set_attributes(self.query_signature.attributes) async def fetch_types(self, db) -> None: try: self.query_signature = await self.contract.get_query_signature(db, self) self._after_types_fetched()
except QueryInitializationError:
1
2023-12-13 15:16:34+00:00
8k
javrtg/C2P
nonmin_pose/constraints/constraint_manager.py
[ { "identifier": "constraints", "path": "nonmin_pose/constraints/constraints.py", "snippet": "def assert_smaller_idxes(param1i, param2i):\n def __init__(self, name: str, block: int, block_ids: List[int]):\n def __init__(\n self,\n params: dict,\n idx_first_el: int,\n idx_first_eq: int = 0,\n drop_eqs: Optional[List[int]] = None,\n ):\n def flatten_eqs_info(self, idx_first_eq, blocks, rows, cols, drop_eqs):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def aggregate_data(f0, f1):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def aggregate_data(f0, f1):\n def get_eqs_info(self, params):\n def compute_coeffs(self, coeffs: np.ndarray, f0: np.ndarray, f1: np.ndarray):\n def aggregate_data_1st_ineq(f0: np.ndarray, f1: np.ndarray):\n def aggregate_data_2nd_ineq(f0: np.ndarray, f1: np.ndarray):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\n def get_eqs_info(self, params):\nclass Parameter:\nclass Constraint(ABC):\nclass Adjoint(Constraint):\nclass NormT(Constraint):\nclass NormQ(Constraint):\nclass NormE(Constraint):\nclass Homogenization(Constraint):\nclass CheiralityTranslationV2(Constraint):\nclass CheiralityRotation(Constraint):\nclass ManifDefLeft(Constraint):\nclass ManifDefRight(Constraint):\nclass EDefLeft(Constraint):\nclass EDefRight(Constraint):\nclass EDefLeftRight(Constraint):\nclass RightNullSpace(Constraint):\nclass LeftNullSpace(Constraint):\nclass CheiralityTranslation(Constraint):\nclass CheiralityRotationQ(Constraint):\nclass CheiralityMidpoint(Constraint):\nclass Orthogonality(Constraint):\nclass DeterminantR(Constraint):\nclass TQDefinition(Constraint):\nclass SkewTQDefinition(Constraint):\nclass ConvexHullSO3(Constraint):\n CONSTRAINT_IDX_PER_EQ: List[List[int]]\n COEFFS_PER_EQ: List[List[float]]\n CONSTRAINT_VALUES: List[float]\n EQUATION = \"adj(E) = qt^T\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"||t||^2 = 1\"\n COEFFS_PER_EQ = [[1.0, 1.0, 1.0]]\n CONSTRAINT_VALUES = [1.0]\n EQUATION = \"||q||^2 = 1\"\n COEFFS_PER_EQ = [[1.0, 1.0, 1.0]]\n CONSTRAINT_VALUES = [1.0]\n EQUATION = \"norm(E) = 2\"\n COEFFS_PER_EQ = [[1.0] * 9]\n CONSTRAINT_VALUES = [2.0]\n E = params[\"E\"]\n EQUATION = \"h^2 = 1\"\n COEFFS_PER_EQ = [[1.0]]\n CONSTRAINT_VALUES = [1.0]\n EQUATION = \"f0^T t01 - q^T f1 - sct^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 6 + [-1.0]]\n CONSTRAINT_VALUES = [0.0]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"f1^T E01^T [t01] f0 - scr^2 =0\"\n COEFFS_PER_EQ = [[1.0] * 18 + [-1.0]]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"E E^T = [t][t]^T\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 6\n EQUATION = \"E^T E = [q][q]^T\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 6\n EQUATION = \"hE = [t]R\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n [1.0, 1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"hE = R[q]\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"[t]R = R[q] = 0\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, 1.0, -1.0],\n [1.0, -1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0, -1.0],\n [-1.0, 1.0, 1.0, -1.0],\n [-1.0, 1.0, -1.0, 1.0],\n [-1.0, 1.0, 1.0, -1.0],\n [1.0, -1.0, 1.0, -1.0],\n [1.0, -1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"E q = 0\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 3\n EQUATION = \"E^T t = 0\"\n COEFFS_PER_EQ = [\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 3\n EQUATION = \"f0^T R01 q - t01^T R01 f1 - s1^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 18 + [-1.0]]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"f0^T E01 [q] f1 + scr^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 19]\n CONSTRAINT_VALUES = [0.0]\n EQUATION = \"f0^T R f1 - t^T R f1 - scm1^2 = 0, f0^T R q - f1^T q - scm2^2 = 0\"\n COEFFS_PER_EQ = [[1.0] * 27 + [-1.0], [1.0] * 27 + [-1.0]]\n CONSTRAINT_VALUES = [0.0, 0.0]\n EQUATION = \"R R.T = I, R.T R = I\"\n COEFFS_PER_EQ = [[1.0, 1.0, 1.0]] * 11\n CONSTRAINT_VALUES = [1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0]\n R = params[\"R\"]\n EQUATION = \"hR = cofactor(R)\"\n COEFFS_PER_EQ = [[1.0, -1.0, 1.0]] * 9\n CONSTRAINT_VALUES = [0.0] * 9\n EQUATION = \"ht - Rq = 0; hq - R^Tt = 0\"\n COEFFS_PER_EQ = [[1.0, -1.0, -1.0, -1.0]] * 6\n CONSTRAINT_VALUES = [0.0] * 6\n EQUATION = \"h[t] - ER^T, h[q] - R^T E\"\n COEFFS_PER_EQ = [\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0, -1.0],\n [-1.0, -1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [0.0] * 18\n EQUATION = \"conv SO(3)\"\n COEFFS_PER_EQ = [\n [1.0, -1.0, -1.0, -1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0],\n [1.0, -1.0, 1.0, 1.0],\n [1.0, -1.0, -1.0],\n [1.0, -1.0, -1.0],\n [1.0, 1.0, -1.0, 1.0],\n [1.0, -1.0, -1.0],\n [1.0, 1.0, 1.0, -1.0],\n ]\n CONSTRAINT_VALUES = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 1.0]" }, { "identifier": "Parameter", "path": "nonmin_pose/constraints/constraints.py", "snippet": "class Parameter:\n \"\"\"Class for defining a parameter.\n\n Attributes:\n name: e.g. E, R, t, etc. This MUST match the name being used on the constraints.\n block: 1-based index of the block.\n block_ids: 1-based index of each parameter element in the block.\n \"\"\"\n\n __slots__ = (\"name\", \"block\", \"block_ids\")\n\n def __init__(self, name: str, block: int, block_ids: List[int]):\n assert block > 0, \"block must be positive\"\n assert all(idx > 0 for idx in block_ids), \"block_id must be positive\"\n\n self.name = name\n self.block = block\n self.block_ids = block_ids" } ]
from collections import defaultdict from typing import Dict, List, Optional, Union from nonmin_pose.constraints import constraints as cnt from nonmin_pose.constraints.constraints import Parameter import numpy as np
3,909
ConstraintConfig = Union[Dict[str, Optional[List[int]]], Dict[str, None]] class ConstraintManager: """Manager of the metadata of constraints (blocks, values, indexes, etc.).""" CONSTRAINT_CLASSES = {
ConstraintConfig = Union[Dict[str, Optional[List[int]]], Dict[str, None]] class ConstraintManager: """Manager of the metadata of constraints (blocks, values, indexes, etc.).""" CONSTRAINT_CLASSES = {
"manif_def_left": cnt.ManifDefLeft,
1
2023-12-10 18:25:10+00:00
8k
Jack24658735/FedLGT
models/CTran.py
[ { "identifier": "SelfAttnLayer", "path": "models/transformer_layers.py", "snippet": "class SelfAttnLayer(nn.Module):\n def __init__(self, d_model, nhead = 4,dropout=0.1):\n super().__init__()\n self.transformer_layer = TransformerEncoderLayer(d_model, nhead, d_model*1, dropout=dropout, activation='relu')\n # self.transformer_layer = nn.TransformerEncoderLayer(d_model, nhead, d_model, dropout=dropout, activation='gelu') \n\n def forward(self,k,mask=None):\n attn = None\n k=k.transpose(0,1) \n x,attn = self.transformer_layer(k,src_mask=mask)\n # x = self.transformer_layer(k,src_mask=mask)\n x=x.transpose(0,1)\n return x,attn" }, { "identifier": "Backbone", "path": "models/backbone.py", "snippet": "class Backbone(nn.Module):\n def __init__(self):\n super(Backbone, self).__init__()\n embedding_dim = 512\n self.freeze_base = False\n self.freeze_base4 = False\n\n # self.base_network = models.resnet101(pretrained=True)\n self.base_network = models.resnet18(pretrained=True)\n # self.base_network = models.resnet50(pretrained=True)\n\n # self.base_network.avgpool = nn.AvgPool2d(kernel_size=7,stride=1,padding=0) # replace avg pool\n # self.base_network.avgpool = nn.AvgPool2d(2,stride=2) # replace avg pool\n\n if self.freeze_base:\n for param in self.base_network.parameters():\n param.requires_grad = False\n elif self.freeze_base4:\n for p in self.base_network.layer4.parameters(): \n p.requires_grad=True\n\n def forward(self,images):\n x = self.base_network.conv1(images)\n x = self.base_network.bn1(x)\n x = self.base_network.relu(x)\n x = self.base_network.maxpool(x)\n x = self.base_network.layer1(x)\n x = self.base_network.layer2(x)\n x = self.base_network.layer3(x)\n x = self.base_network.layer4(x)\n # x = self.base_network.avgpool(x)\n \n return x" }, { "identifier": "BackboneCLIP", "path": "models/backbone.py", "snippet": "class BackboneCLIP(nn.Module):\n def __init__(self, model=None):\n super(BackboneCLIP, self).__init__()\n # self.base_network = models.resnet101(pretrained=True)\n # self.base_network = models.resnet18(pretrained=True)\n # model, _ = clip.load(\"RN50\")\n # print()\n model, _ = clip.load(\"ViT-B/16\", device='cuda')\n self.base_network = model.visual\n for param in self.base_network.parameters():\n param.requires_grad = False\n # self.base_network.avgpool = nn.AvgPool2d(kernel_size=7,stride=1,padding=0) # replace avg pool\n # self.base_network.avgpool = nn.AvgPool2d(2,stride=2) # replace avg pool\n\n # print(self.base_network)\n # if self.freeze_base:\n # for param in self.base_network.parameters():\n # param.requires_grad = False\n # elif self.freeze_base4:\n # for p in self.base_network.layer4.parameters(): \n # p.requires_grad=True\n\n def forward(self, x: torch.Tensor):\n x = self.base_network.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat([self.base_network.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width]\n x = x + self.base_network.positional_embedding.to(x.dtype)\n x = self.base_network.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.base_network.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # x = self.base_network.ln_post(x[:, :, :])\n x = self.base_network.ln_post(x[:, 0, :])\n\n if self.base_network.proj is not None:\n x = x @ self.base_network.proj\n\n return x" }, { "identifier": "custom_replace", "path": "models/utils.py", "snippet": "def custom_replace(tensor,on_neg_1,on_zero,on_one):\n res = tensor.clone()\n res[tensor==-1] = on_neg_1\n res[tensor==0] = on_zero\n res[tensor==1] = on_one\n return res" }, { "identifier": "weights_init", "path": "models/utils.py", "snippet": "def weights_init(module):\n \"\"\" Initialize the weights \"\"\"\n if isinstance(module, (nn.Linear, nn.Embedding)):\n stdv = 1. / math.sqrt(module.weight.size(1))\n module.weight.data.uniform_(-stdv, stdv)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.uniform_(-stdv, stdv)\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)" }, { "identifier": "PositionEmbeddingSine", "path": "models/position_enc.py", "snippet": "class PositionEmbeddingSine(nn.Module):\n \"\"\"\n This is a more standard version of the position embedding, very similar to the one\n used by the Attention is all you need paper, generalized to work on images.\n \"\"\"\n def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):\n super().__init__()\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n if scale is not None and normalize is False:\n raise ValueError(\"normalize should be True if scale is passed\")\n if scale is None:\n scale = 2 * math.pi\n self.scale = scale\n\n def forward(self, x, mask):\n # x = tensor_list.tensors\n # mask = tensor_list.mask\n assert mask is not None\n not_mask = ~mask\n # stop()\n y_embed = not_mask.cumsum(1)#, dtype=torch.float32)\n x_embed = not_mask.cumsum(2)#, dtype=torch.float32)\n if self.normalize:\n eps = 1e-6\n y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale\n x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale\n\n dim_t = torch.arange(self.num_pos_feats, device=x.device)#, dtype=torch.float32)\n dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)\n\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n # stop()\n \n pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos" }, { "identifier": "positionalencoding2d", "path": "models/position_enc.py", "snippet": "def positionalencoding2d(d_model, height, width):\n \"\"\"\n :param d_model: dimension of the model\n :param height: height of the positions\n :param width: width of the positions\n :return: d_model*height*width position matrix\n \"\"\"\n if d_model % 4 != 0:\n raise ValueError(\"Cannot use sin/cos positional encoding with \"\n \"odd dimension (got dim={:d})\".format(d_model))\n pe = torch.zeros(d_model, height, width)\n # Each dimension use half of d_model\n d_model = int(d_model / 2)\n div_term = torch.exp(torch.arange(0., d_model, 2) *\n -(math.log(10000.0) / d_model))\n pos_w = torch.arange(0., width).unsqueeze(1)\n pos_h = torch.arange(0., height).unsqueeze(1)\n pe[0:d_model:2, :, :] = torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)\n pe[1:d_model:2, :, :] = torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)\n pe[d_model::2, :, :] = torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)\n pe[d_model + 1::2, :, :] = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)\n\n return pe" }, { "identifier": "MLDecoder", "path": "models/ml_decoder.py", "snippet": "class MLDecoder(nn.Module):\n def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768,\n initial_num_features=2048, zsl=0):\n super(MLDecoder, self).__init__()\n embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups\n if embed_len_decoder > num_classes:\n embed_len_decoder = num_classes\n\n # switching to 768 initial embeddings\n decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding\n embed_standart = nn.Linear(initial_num_features, decoder_embedding)\n\n # non-learnable queries\n if not zsl:\n query_embed = nn.Embedding(embed_len_decoder, decoder_embedding)\n query_embed.requires_grad_(False)\n else:\n query_embed = None\n\n # decoder\n decoder_dropout = 0.1\n num_layers_decoder = 1\n dim_feedforward = 2048\n layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding,\n dim_feedforward=dim_feedforward, dropout=decoder_dropout)\n self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder)\n self.decoder.embed_standart = embed_standart\n self.decoder.query_embed = query_embed\n self.zsl = zsl\n\n if self.zsl:\n if decoder_embedding != 300:\n self.wordvec_proj = nn.Linear(300, decoder_embedding)\n else:\n self.wordvec_proj = nn.Identity()\n self.decoder.duplicate_pooling = torch.nn.Parameter(torch.Tensor(decoder_embedding, 1))\n self.decoder.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(1))\n self.decoder.duplicate_factor = 1\n else:\n # group fully-connected\n self.decoder.num_classes = num_classes\n self.decoder.duplicate_factor = int(num_classes / embed_len_decoder + 0.999)\n self.decoder.duplicate_pooling = torch.nn.Parameter(\n torch.Tensor(embed_len_decoder, decoder_embedding, self.decoder.duplicate_factor))\n self.decoder.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes))\n torch.nn.init.xavier_normal_(self.decoder.duplicate_pooling)\n torch.nn.init.constant_(self.decoder.duplicate_pooling_bias, 0)\n self.decoder.group_fc = GroupFC(embed_len_decoder)\n self.train_wordvecs = None\n self.test_wordvecs = None\n\n def forward(self, x, q):\n if len(x.shape) == 4: # [bs,2048, 7,7]\n embedding_spatial = x.flatten(2).transpose(1, 2)\n else: # [bs, 197,468]\n embedding_spatial = x\n embedding_spatial_786 = self.decoder.embed_standart(embedding_spatial)\n embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True)\n\n bs = embedding_spatial_786.shape[0]\n if self.zsl:\n query_embed = torch.nn.functional.relu(self.wordvec_proj(self.decoder.query_embed))\n else:\n # query_embed = self.decoder.query_embed.weight\n query_embed = q\n # tgt = query_embed.unsqueeze(1).repeat(1, bs, 1)\n # tgt = query_embed.unsqueeze(1).expand(-1, 1, -1) # no allocation of memory with expand\n # print(query_embed.shape)\n tgt = query_embed.transpose(1,0).float()\n h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) # [embed_len_decoder, batch, 768]\n h = h.transpose(0, 1)\n\n out_extrap = torch.zeros(h.shape[0], h.shape[1], self.decoder.duplicate_factor, device=h.device, dtype=h.dtype)\n self.decoder.group_fc(h, self.decoder.duplicate_pooling, out_extrap)\n if not self.zsl:\n h_out = out_extrap.flatten(1)[:, :self.decoder.num_classes]\n else:\n h_out = out_extrap.flatten(1)\n h_out += self.decoder.duplicate_pooling_bias\n logits = h_out\n return logits" } ]
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np from .transformer_layers import SelfAttnLayer from .backbone import Backbone, BackboneCLIP from .utils import custom_replace,weights_init from .position_enc import PositionEmbeddingSine,positionalencoding2d from .ml_decoder import MLDecoder
3,637
class CTranModel(nn.Module): def __init__(self,num_labels,use_lmt,pos_emb=False,layers=3,heads=4,dropout=0.1,int_loss=0,no_x_features=False, state_weight=None, label_weight=None): super(CTranModel, self).__init__() self.use_lmt = use_lmt self.no_x_features = no_x_features # (for no image features) # ResNet backbone self.backbone = Backbone() # self.backbone_c = BackboneCLIP() hidden = 512 # this should match the backbone output feature size self.downsample = False if self.downsample: self.conv_downsample = torch.nn.Conv2d(hidden,hidden,(1,1)) # Label Embeddings self.label_input = torch.Tensor(np.arange(num_labels)).view(1,-1).long() self.label_lt = torch.nn.Embedding(num_labels, hidden, padding_idx=None) self.clip_label_lt = nn.Embedding.from_pretrained(label_weight, freeze=True, padding_idx=None) # State Embeddings self.known_label_lt = nn.Embedding.from_pretrained(state_weight, freeze=True, padding_idx=0) # self.known_label_lt = torch.nn.Embedding(3, hidden, padding_idx=0) # Position Embeddings (for image features) self.use_pos_enc = pos_emb if self.use_pos_enc: # self.position_encoding = PositionEmbeddingSine(int(hidden/2), normalize=True) self.position_encoding = positionalencoding2d(hidden, 18, 18).unsqueeze(0) # Transformer
class CTranModel(nn.Module): def __init__(self,num_labels,use_lmt,pos_emb=False,layers=3,heads=4,dropout=0.1,int_loss=0,no_x_features=False, state_weight=None, label_weight=None): super(CTranModel, self).__init__() self.use_lmt = use_lmt self.no_x_features = no_x_features # (for no image features) # ResNet backbone self.backbone = Backbone() # self.backbone_c = BackboneCLIP() hidden = 512 # this should match the backbone output feature size self.downsample = False if self.downsample: self.conv_downsample = torch.nn.Conv2d(hidden,hidden,(1,1)) # Label Embeddings self.label_input = torch.Tensor(np.arange(num_labels)).view(1,-1).long() self.label_lt = torch.nn.Embedding(num_labels, hidden, padding_idx=None) self.clip_label_lt = nn.Embedding.from_pretrained(label_weight, freeze=True, padding_idx=None) # State Embeddings self.known_label_lt = nn.Embedding.from_pretrained(state_weight, freeze=True, padding_idx=0) # self.known_label_lt = torch.nn.Embedding(3, hidden, padding_idx=0) # Position Embeddings (for image features) self.use_pos_enc = pos_emb if self.use_pos_enc: # self.position_encoding = PositionEmbeddingSine(int(hidden/2), normalize=True) self.position_encoding = positionalencoding2d(hidden, 18, 18).unsqueeze(0) # Transformer
self.self_attn_layers = nn.ModuleList([SelfAttnLayer(hidden,heads,dropout) for _ in range(layers)])
0
2023-12-09 09:16:59+00:00
8k
AgriCodeHub/dairy-django-backend
health/serializers.py
[ { "identifier": "Cow", "path": "core/models.py", "snippet": "class Cow(models.Model):\n \"\"\"\n Represents an individual cow in the dairy farm.\n\n Attributes:\n - `name` (str): The name of the cow.\n - `breed` (CowBreed): The breed of the cow.\n - `date_of_birth` (date): The birthdate of the cow.\n - `gender` (str): The gender of the cow.\n - `availability_status` (str): The availability status of the cow.\n - `sire` (Cow or None): The sire (father) of the cow.\n - `dam` (Cow or None): The dam (mother) of the cow.\n - `current_pregnancy_status` (str): The current pregnancy status of the cow.\n - `category` (str): The category of the cow.\n - `current_production_status` (str): The current production status of the cow.\n - `date_introduced_in_farm` (date): The date the cow was introduced to the farm.\n - `is_bought` (bool): Indicates whether the cow was bought or not.\n - `date_of_death` (date or None): The date of death of the cow, if applicable.\n \"\"\"\n\n name = models.CharField(max_length=35)\n breed = models.ForeignKey(CowBreed, on_delete=models.PROTECT, related_name=\"cows\")\n date_of_birth = models.DateField()\n gender = models.CharField(max_length=6, choices=SexChoices.choices)\n availability_status = models.CharField(\n choices=CowAvailabilityChoices.choices,\n default=CowAvailabilityChoices.ALIVE,\n max_length=5,\n )\n current_pregnancy_status = models.CharField(\n choices=CowPregnancyChoices.choices,\n default=CowPregnancyChoices.UNAVAILABLE,\n max_length=12,\n )\n category = models.CharField(\n choices=CowCategoryChoices.choices,\n default=CowCategoryChoices.CALF,\n max_length=11,\n )\n current_production_status = models.CharField(\n choices=CowProductionStatusChoices.choices,\n max_length=22,\n default=CowProductionStatusChoices.CALF,\n )\n is_bought = models.BooleanField(default=False)\n sire = models.ForeignKey(\n \"self\", on_delete=models.SET_NULL, null=True, related_name=\"offspring\"\n )\n dam = models.ForeignKey(\n \"self\", on_delete=models.SET_NULL, null=True, related_name=\"calves\"\n )\n date_introduced_in_farm = models.DateField(auto_now=True)\n\n date_of_death = models.DateField(null=True)\n\n objects = CowManager()\n\n @property\n def tag_number(self):\n \"\"\"\n Returns the tag number of the cow.\n \"\"\"\n return Cow.objects.get_tag_number(self)\n\n @property\n def age(self):\n \"\"\"\n Calculates and returns the age of the cow in days.\n \"\"\"\n return Cow.objects.calculate_age(self)\n\n @property\n def age_in_farm(self):\n \"\"\"\n Calculates and returns the age of the cow in days since introduction to the farm.\n \"\"\"\n return Cow.objects.calculate_age_in_farm(self)\n\n @property\n def parity(self):\n \"\"\"\n Calculates and returns the parity of the cow.\n \"\"\"\n return Cow.objects.calculate_parity(self)\n\n @property\n def calf_records(self):\n return Cow.objects.get_calf_records(self)\n\n def clean(self):\n \"\"\"\n Performs validation checks before saving the cow.\n\n Raises:\n - `ValidationError`: If cow validation fails.\n\n \"\"\"\n\n if self.pk:\n CowValidator.validate_production_status_2(\n self.current_production_status,\n self.gender,\n self.category,\n self.age,\n self.calf_records,\n self.is_bought,\n self,\n )\n CowValidator.validate_age_category(\n self.age,\n self.category,\n self.gender,\n self.calf_records,\n self.is_bought,\n self,\n )\n else:\n CowValidator.validate_pregnancy_status(\n self,\n self.age,\n self.current_pregnancy_status,\n self.availability_status,\n self.gender,\n )\n CowValidator.validate_uniqueness(self.name)\n CowValidator.validate_cow_age(self.age, self.date_of_birth)\n CowValidator.validate_gender_update(self.pk, self.gender)\n CowValidator.validate_sire_dam_relationship(self.sire, self.dam)\n CowValidator.validate_production_status_1(\n self.current_production_status,\n self.gender,\n self.age,\n )\n CowValidator.validate_pregnancy_status(\n self,\n self.age,\n self.current_pregnancy_status,\n self.availability_status,\n self.gender,\n )\n\n CowValidator.validate_date_of_death(\n self.availability_status, self.date_of_death\n )\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the cow.\n \"\"\"\n return self.tag_number\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to ensure validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)" }, { "identifier": "DiseaseCategory", "path": "health/models.py", "snippet": "class DiseaseCategory(models.Model):\n \"\"\"\n Represents a category of diseases affecting cows.\n\n Attributes:\n - `name` (str): The name of the disease category, chosen from predefined choices.\n\n Methods:\n - `clean`: Validates the name of the disease category.\n \"\"\"\n\n name = models.CharField(\n max_length=15, choices=DiseaseCategoryChoices.choices, unique=True\n )\n\n def clean(self):\n \"\"\"\n Validate the name of the disease category.\n \"\"\"\n DiseaseCategoryValidator.validate_name(self.name)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)" }, { "identifier": "WeightRecord", "path": "health/models.py", "snippet": "class WeightRecord(models.Model):\n \"\"\"\n Represents a weight record for a cow.\n\n Attributes:\n - `cow` (Cow): The cow associated with the weight record.\n - `weight_in_kgs` (Decimal): The weight of the cow in kilograms.\n - `date_taken` (Date): The date when the weight record was taken.\n\n Methods:\n - `__str__`: Returns a string representation of the weight record.\n - `clean`: Performs validation checks before saving the weight record.\n - `save`: Overrides the save method to ensure validation before saving.\n\n Raises:\n - `ValidationError`: If weight record validation fails.\n \"\"\"\n\n cow = models.ForeignKey(Cow, on_delete=models.CASCADE)\n weight_in_kgs = models.DecimalField(max_digits=6, decimal_places=2)\n date_taken = models.DateField(auto_now_add=True)\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the weight record.\n \"\"\"\n return (\n f\"{self.cow} - Weight: {self.weight_in_kgs} kgs - Date: {self.date_taken}\"\n )\n\n def clean(self):\n \"\"\"\n Performs validation checks before saving the weight record.\n\n Raises:\n - `ValidationError`: If weight record validation fails.\n \"\"\"\n WeightRecordValidator.validate_weight(self.weight_in_kgs)\n WeightRecordValidator.validate_cow_availability_status(self.cow)\n WeightRecordValidator.validate_frequency_of_weight_records(\n self.date_taken, self.cow\n )\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to ensure validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)" }, { "identifier": "CullingRecord", "path": "health/models.py", "snippet": "class CullingRecord(models.Model):\n \"\"\"\n Represents a culling record for a cow.\n\n Attributes:\n - `cow` (Cow): The cow associated with the culling record.\n - `reason` (str): The reason for culling, chosen from predefined choices.\n - `notes` (str): Additional notes or comments about the culling.\n - `date_carried` (Date): The date when the culling record was created.\n\n Methods:\n - `__str__`: Returns a string representation of the culling record.\n \"\"\"\n\n cow = models.OneToOneField(\n Cow, on_delete=models.CASCADE, related_name=\"culling_record\"\n )\n reason = models.CharField(max_length=35, choices=CullingReasonChoices.choices)\n notes = models.TextField(null=True, max_length=100)\n date_carried = models.DateField(auto_now_add=True)\n\n def __str__(self):\n \"\"\"\n Returns a string representation of the culling record.\n \"\"\"\n return f\"CullingRecord for {self.cow} - Reason: {self.reason} - Date: {self.date_carried}\"" }, { "identifier": "QuarantineRecord", "path": "health/models.py", "snippet": "class QuarantineRecord(models.Model):\n \"\"\"\n Represents a quarantine record for a cow.\n\n Attributes:\n - `cow` (Cow): The cow associated with the quarantine record.\n - `reason` (str): The reason for quarantine, chosen from predefined choices.\n - `start_date` (Date): The start date of the quarantine period.\n - `end_date` (Date): The end date of the quarantine period (optional).\n - `notes` (str): Additional notes or comments about the quarantine.\n\n Methods:\n - `__str__`: Returns a string representation of the quarantine record.\n - `clean`: Validates the reason for quarantine and the date range.\n - `save`: Overrides the save method to perform additional validation before saving.\n \"\"\"\n\n class Meta:\n get_latest_by = \"-start_date\"\n\n cow = models.ForeignKey(\n Cow, on_delete=models.CASCADE, related_name=\"quarantine_records\"\n )\n reason = models.CharField(max_length=35, choices=QuarantineReasonChoices.choices)\n start_date = models.DateField(auto_now_add=True)\n end_date = models.DateField(null=True)\n notes = models.TextField(null=True, max_length=100)\n\n def __str__(self):\n if self.end_date:\n return f\"Quarantine Record of {self.cow.tag_number} from {self.start_date} to {self.end_date}\"\n return f\"Quarantine Record of {self.cow.tag_number} from {self.start_date}\"\n\n def clean(self):\n \"\"\"\n Validate the reason for quarantine and the date range for start and end dates.\n \"\"\"\n # Validate the reason for quarantine\n QuarantineValidator.validate_reason(self.reason, self.cow)\n\n # Validate the date range for start and end dates\n QuarantineValidator.validate_date(self.start_date, self.end_date)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)" }, { "identifier": "Pathogen", "path": "health/models.py", "snippet": "class Pathogen(models.Model):\n \"\"\"\n Represents a pathogen affecting a cow.\n\n Attributes:\n - `name` (str): The type of pathogen, chosen from predefined choices.\n\n Methods:\n - `clean`: Validates the name of the pathogen.\n \"\"\"\n\n name = models.CharField(max_length=10, choices=PathogenChoices.choices, unique=True)\n # diagnosis_date = models.DateField(auto_now_add=True)\n\n def clean(self):\n \"\"\"\n Validate the name of the pathogen.\n \"\"\"\n PathogenValidator.validate_name(self.name)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)" }, { "identifier": "Symptoms", "path": "health/models.py", "snippet": "class Symptoms(models.Model):\n \"\"\"\n Represents symptoms reported in cows.\n\n Attributes:\n - `name` (str): The name of the symptom.\n - `symptom_type` (str): The type of the symptom, chosen from predefined choices.\n - `description` (str): Description of the symptom (nullable).\n - `date_observed` (date): Date when the symptom was observed.\n - `severity` (str): Severity of the symptom, chosen from predefined choices.\n - `location` (str): Location of the symptom, chosen from predefined choices.\n\n Methods:\n - `clean`: Validates the attributes of the symptom.\n \"\"\"\n\n name = models.CharField(max_length=50)\n symptom_type = models.CharField(max_length=20, choices=SymptomTypeChoices.choices)\n description = models.TextField(null=True)\n severity = models.CharField(max_length=20, choices=SymptomSeverityChoices.choices)\n location = models.CharField(max_length=20, choices=SymptomLocationChoices.choices)\n date_observed = models.DateField()\n\n def clean(self):\n \"\"\"\n Validates the attributes of the symptom.\n \"\"\"\n SymptomValidator.validate_name(self.name)\n SymptomValidator.validate_fields(\n self.date_observed, self.symptom_type, self.severity, self.location\n )\n SymptomValidator.validate_type_and_location_compatibility(\n self.symptom_type, self.location\n )\n\n def __str__(self):\n return f\" {self.name} reported as #{self.severity} - on #{self.date_observed}\"\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)" }, { "identifier": "Disease", "path": "health/models.py", "snippet": "class Disease(models.Model):\n \"\"\"\n Represents diseases in cows.\n\n Attributes:\n - `name` (str): The name of the disease.\n - `pathogen` (ForeignKey): The pathogen causing the disease.\n - `category` (ForeignKey): The category of the disease.\n - `date_reported` (date): Date when the disease was reported.\n - `occurrence_date` (date): Date when the disease occurred.\n - `notes` (str): Additional notes about the disease (nullable).\n - `cows` (ManyToManyField): Cows affected by the disease.\n - `symptoms` (ManyToManyField): Symptoms associated with the disease.\n\n Methods:\n - `clean`: Validates the attributes of the disease.\n \"\"\"\n\n name = models.CharField(max_length=50)\n pathogen = models.ForeignKey(Pathogen, on_delete=models.PROTECT)\n category = models.ForeignKey(\n DiseaseCategory, on_delete=models.PROTECT, related_name=\"diseases\"\n )\n date_reported = models.DateField(auto_now_add=True)\n occurrence_date = models.DateField()\n notes = models.TextField(null=True)\n cows = models.ManyToManyField(Cow, related_name=\"diseases\")\n symptoms = models.ManyToManyField(Symptoms, related_name=\"diseases\")\n\n def __str__(self):\n return f\"{self.name} ({self.pathogen.name}) occurred on {self.occurrence_date}\"\n\n def clean(self):\n DiseaseValidator.validate_date(self.occurrence_date)\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)" }, { "identifier": "Recovery", "path": "health/models.py", "snippet": "class Recovery(models.Model):\n \"\"\"\n Represents the recovery status of a cow from a specific disease.\n\n Attributes:\n - `cow` (ForeignKey): The cow recovering from the disease.\n - `disease` (ForeignKey): The disease from which the cow is recovering.\n - `diagnosis_date` (date): Date when the disease was diagnosed.\n - `recovery_date` (date): Date when the cow recovered (nullable).\n \"\"\"\n\n cow = models.ForeignKey(Cow, on_delete=models.CASCADE, related_name=\"recoveries\")\n disease = models.ForeignKey(\n Disease, on_delete=models.CASCADE, related_name=\"recoveries\"\n )\n diagnosis_date = models.DateField()\n recovery_date = models.DateField(null=True)\n\n def __str__(self):\n if self.recovery_date:\n return f\"{self.cow.tag_number} recovered from {self.disease.name} on {self.recovery_date}\"\n return f\"{self.cow.tag_number} not yet recovered from {self.disease.name}\"" }, { "identifier": "Treatment", "path": "health/models.py", "snippet": "class Treatment(models.Model):\n \"\"\"\n Represents the treatment details for a cow diagnosed with a specific disease.\n\n Attributes:\n - `disease` (ForeignKey): The disease for which the cow is receiving treatment.\n - `cow` (ForeignKey): The cow undergoing treatment.\n - `date_of_treatment` (date): Date when the treatment was initiated.\n - `treatment_method` (str): Description of the treatment method (max length: 300).\n - `notes` (str, nullable): Additional notes about the treatment.\n - `treatment_status` (str): Status of the treatment (choices: 'Scheduled', 'In Progress', 'Completed').\n - `completion_date` (date, nullable): Date when the treatment was completed.\n\n Methods:\n - `clean`: Validates the attributes of the treatment.\n \"\"\"\n\n disease = models.ForeignKey(Disease, on_delete=models.PROTECT)\n cow = models.ForeignKey(Cow, on_delete=models.CASCADE)\n date_of_treatment = models.DateField(auto_now_add=True)\n treatment_method = models.TextField(max_length=300)\n notes = models.TextField(null=True)\n treatment_status = models.CharField(\n max_length=15,\n choices=TreatmentStatusChoices.choices,\n default=TreatmentStatusChoices.SCHEDULED,\n )\n completion_date = models.DateField(null=True)\n\n def clean(self):\n \"\"\"\n Validates the attributes of the treatment.\n\n Raises:\n - `ValidationError` (code: `invalid_treatment_status`):\n If the treatment status is invalid based on the cow's current recovery status.\n \"\"\"\n TreatmentValidator.validate_treatment_status(\n self.cow, self.treatment_status, self.notes, self.completion_date\n )\n\n def save(self, *args, **kwargs):\n \"\"\"\n Overrides the save method to perform additional validation before saving.\n \"\"\"\n self.clean()\n super().save(*args, **kwargs)\n\n def __str__(self):\n if self.completion_date:\n return f\"{self.cow.tag_number} completed treatment for {self.disease.name} on {self.completion_date}\"\n return f\"{self.cow.tag_number} undergoing treatment for {self.disease.name}\"" } ]
from rest_framework import serializers from core.models import Cow from health.models import ( DiseaseCategory, WeightRecord, CullingRecord, QuarantineRecord, Pathogen, Symptoms, Disease, Recovery, Treatment, )
6,470
class Meta: model = DiseaseCategory fields = ("name",) ``` """ class Meta: model = DiseaseCategory fields = ("name",) class SymptomsSerializer(serializers.ModelSerializer): """ Serializer for the Symptoms model. Fields: - `name`: The name of the symptom. - `symptom_type`: The type of the symptom. - `description`: Description of the symptom (nullable). - `date_observed`: Date when the symptom was observed. - `severity`: Severity of the symptom. - `location`: Location of the symptom. Meta: - `model`: The Symptoms model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. """ class Meta: model = Symptoms fields = ( "name", "symptom_type", "description", "date_observed", "severity", "location", ) class DiseaseSerializer(serializers.ModelSerializer): """ Serializer for the Disease model. Fields: - `name`: The name of the disease. - `pathogen`: The pathogen causing the disease. - `category`: The category of the disease. - `date_reported`: Date when the disease was reported. - `occurrence_date`: Date when the disease occurred. - `notes`: Additional notes about the disease (nullable). - `cows`: Cows affected by the disease. - `symptoms`: Symptoms associated with the disease. Meta: - `model`: The Disease model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. Note: The `cows` and `symptoms` fields are represented by their primary keys in the serialized data. """ class Meta: model = Disease fields = ( "name", "pathogen", "category", "date_reported", "occurrence_date", "notes", "cows", "symptoms", ) class RecoverySerializer(serializers.ModelSerializer): """ Serializer for the Recovery model. Fields: - `cow`: The cow recovering from the disease. - `disease`: The disease from which the cow is recovering. - `diagnosis_date`: Date when the disease was diagnosed. - `recovery_date`: Date when the cow recovered (nullable). Meta: - `model`: The Recovery model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. Note: The `cow` and `disease` fields are represented by their primary keys in the serialized data. """ class Meta: model = Recovery fields = ("cow", "disease", "diagnosis_date", "recovery_date") class TreatmentSerializer(serializers.ModelSerializer): """ Serializer for the Treatment model. Fields: - `disease`: The disease for which the cow is receiving treatment. - `cow`: The cow undergoing treatment. - `date_of_treatment`: Date when the treatment was initiated. - `treatment_method`: Description of the treatment method (max length: 300). - `notes`: Additional notes about the treatment (nullable). - `treatment_status`: Status of the treatment. - `completion_date`: Date when the treatment was completed (nullable). Meta: - `model`: The Treatment model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. Note: The `disease` and `cow` fields are represented by their primary keys in the serialized data. """ class Meta:
class WeightRecordSerializer(serializers.ModelSerializer): """ Serializer for the WeightRecord model. Fields: - `cow`: A primary key related field representing the cow associated with the weight record. - `weight_in_kgs`: A decimal field representing the weight of the cow in kilograms. - `date_taken`: A date field representing the date when the weight record was taken. Meta: - `model`: The WeightRecord model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. Usage: Use this serializer to convert WeightRecord model instances to JSON representations and vice versa. Example: ``` class WeightRecord(models.Model): cow = models.ForeignKey(Cow, on_delete=models.CASCADE) weight_in_kgs = models.DecimalField(max_digits=6, decimal_places=2) date_taken = models.DateField(auto_now_add=True) class WeightRecordSerializer(serializers.ModelSerializer): class Meta: model = WeightRecord fields = ("cow", "weight_in_kgs", "date_taken") ``` """ cow = serializers.PrimaryKeyRelatedField(queryset=Cow.objects.all()) class Meta: model = WeightRecord fields = ("cow", "weight_in_kgs", "date_taken") class CullingRecordSerializer(serializers.ModelSerializer): """ Serializer for the CullingRecord model. Fields: - `cow`: A primary key related field representing the cow associated with the culling record. - `reason`: A field representing the reason for culling, chosen from predefined choices. - `notes`: A text field representing additional notes or comments about the culling. - `date_carried`: A date field representing the date when the culling record was created. Meta: - `model`: The CullingRecord model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. Usage: Use this serializer to convert CullingRecord model instances to JSON representations and vice versa. Example: ```python class CullingRecord(models.Model): cow = models.OneToOneField(Cow, on_delete=models.CASCADE, related_name="culling_record") reason = models.CharField(max_length=35, choices=CullingReasonChoices.choices) notes = models.TextField(null=True, max_length=100) date_carried = models.DateField(auto_now_add=True) class CullingRecordSerializer(serializers.ModelSerializer): class Meta: model = CullingRecord fields = ("cow", "reason", "notes", "date_carried") ``` """ cow = serializers.PrimaryKeyRelatedField(queryset=Cow.objects.all()) class Meta: model = CullingRecord fields = ("cow", "reason", "notes", "date_carried") class QuarantineRecordSerializer(serializers.ModelSerializer): """ Serializer for the QuarantineRecord model. Fields: - `cow`: A primary key related field representing the cow associated with the quarantine record. - `reason`: A choice field representing the reason for quarantine. - `start_date`: A date field representing the start date of the quarantine record. - `end_date`: A date field representing the end date of the quarantine record. - `notes`: A text field representing optional notes for the quarantine record. Meta: - `model`: The QuarantineRecord model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. Usage: Use this serializer to convert QuarantineRecord model instances to JSON representations and vice versa. Example: ``` class QuarantineRecord(models.Model): cow = models.ForeignKey(Cow, on_delete=models.CASCADE, related_name="quarantine_records") reason = models.CharField(max_length=35, choices=QuarantineReasonChoices.choices) start_date = models.DateField(auto_now_add=True) end_date = models.DateField(null=True) notes = models.TextField(null=True, max_length=100) class QuarantineRecordSerializer(serializers.ModelSerializer): class Meta: model = QuarantineRecord fields = ("cow", "reason", "start_date", "end_date", "notes") ``` """ cow = serializers.PrimaryKeyRelatedField(queryset=Cow.objects.all()) class Meta: model = QuarantineRecord fields = ("cow", "reason", "start_date", "end_date", "notes") class PathogenSerializer(serializers.ModelSerializer): """ Serializer for the Pathogen model. Fields: - `name`: A choice field representing the type of pathogen. Meta: - `model`: The Pathogen model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. Usage: Use this serializer to convert Pathogen model instances to JSON representations and vice versa. Example: ``` class Pathogen(models.Model): name = models.CharField(max_length=10, choices=PathogenChoices.choices) # diagnosis_date = models.DateField(auto_now_add=True) class PathogenSerializer(serializers.ModelSerializer): class Meta: model = Pathogen fields = ("name",) ``` """ class Meta: model = Pathogen fields = ("name",) class DiseaseCategorySerializer(serializers.ModelSerializer): """ Serializer for the DiseaseCategory model. Fields: - `name`: A choice field representing the type of disease. Meta: - `model`: The DiseaseCategory model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. Usage: Use this serializer to convert DiseaseCategory model instances to JSON representations and vice versa. Example: ``` class DiseaseCategory(models.Model): name = models.CharField(max_length=15, choices=DiseaseCategoryChoices.choices) class DiseaseCategorySerializer(serializers.ModelSerializer): class Meta: model = DiseaseCategory fields = ("name",) ``` """ class Meta: model = DiseaseCategory fields = ("name",) class SymptomsSerializer(serializers.ModelSerializer): """ Serializer for the Symptoms model. Fields: - `name`: The name of the symptom. - `symptom_type`: The type of the symptom. - `description`: Description of the symptom (nullable). - `date_observed`: Date when the symptom was observed. - `severity`: Severity of the symptom. - `location`: Location of the symptom. Meta: - `model`: The Symptoms model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. """ class Meta: model = Symptoms fields = ( "name", "symptom_type", "description", "date_observed", "severity", "location", ) class DiseaseSerializer(serializers.ModelSerializer): """ Serializer for the Disease model. Fields: - `name`: The name of the disease. - `pathogen`: The pathogen causing the disease. - `category`: The category of the disease. - `date_reported`: Date when the disease was reported. - `occurrence_date`: Date when the disease occurred. - `notes`: Additional notes about the disease (nullable). - `cows`: Cows affected by the disease. - `symptoms`: Symptoms associated with the disease. Meta: - `model`: The Disease model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. Note: The `cows` and `symptoms` fields are represented by their primary keys in the serialized data. """ class Meta: model = Disease fields = ( "name", "pathogen", "category", "date_reported", "occurrence_date", "notes", "cows", "symptoms", ) class RecoverySerializer(serializers.ModelSerializer): """ Serializer for the Recovery model. Fields: - `cow`: The cow recovering from the disease. - `disease`: The disease from which the cow is recovering. - `diagnosis_date`: Date when the disease was diagnosed. - `recovery_date`: Date when the cow recovered (nullable). Meta: - `model`: The Recovery model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. Note: The `cow` and `disease` fields are represented by their primary keys in the serialized data. """ class Meta: model = Recovery fields = ("cow", "disease", "diagnosis_date", "recovery_date") class TreatmentSerializer(serializers.ModelSerializer): """ Serializer for the Treatment model. Fields: - `disease`: The disease for which the cow is receiving treatment. - `cow`: The cow undergoing treatment. - `date_of_treatment`: Date when the treatment was initiated. - `treatment_method`: Description of the treatment method (max length: 300). - `notes`: Additional notes about the treatment (nullable). - `treatment_status`: Status of the treatment. - `completion_date`: Date when the treatment was completed (nullable). Meta: - `model`: The Treatment model for which the serializer is defined. - `fields`: The fields to include in the serialized representation. Note: The `disease` and `cow` fields are represented by their primary keys in the serialized data. """ class Meta:
model = Treatment
9
2023-12-09 06:56:42+00:00
8k
facebookresearch/chat2map-official
main.py
[ { "identifier": "baseline_registry", "path": "chat2map/common/baseline_registry.py", "snippet": "class BaselineRegistry(Registry):\n def register_trainer(cls, to_register=None, *, name: Optional[str] = None):\n def get_trainer(cls, name):\n def register_env(cls, to_register=None, *, name: Optional[str] = None):\n def get_env(cls, name):" }, { "identifier": "get_config", "path": "chat2map/config/default.py", "snippet": "def get_config(\n\t\tconfig_paths: Optional[Union[List[str], str]] = None,\n\t\topts: Optional[list] = None,\n\t\tmodel_dir: Optional[str] = None,\n\t\trun_type: Optional[str] = None\n) -> CN:\n\t\"\"\"\n\tCreate a unified config with default values overwritten by values from\n\t`config_paths` and overwritten by options from `opts`.\n\t:param config_paths: List of config paths or string that contains comma separated list of config paths.\n\t:param opts: Config options (keys, values) in a list (e.g., passed from command line into the config. For example,\n\t\t\t\t`opts = ['FOO.BAR',0.5]`. Argument can be used for parameter sweeping or quick tests.\n\t:param model_dir: suffix for output dirs\n\t:param run_type: either train or eval\n\t:return:\n\t\"\"\"\n\n\tconfig = merge_from_path(_C.clone(), config_paths)\n\tconfig.TASK_CONFIG = get_task_config(config_paths=config.BASE_TASK_CONFIG_PATH)\n\n\tif opts:\n\t\tconfig.CMD_TRAILING_OPTS = opts\n\t\tconfig.merge_from_list(opts)\n\n\tassert model_dir is not None, \"set --model-dir\"\n\tconfig.MODEL_DIR = model_dir\n\tconfig.TENSORBOARD_DIR = os.path.join(config.MODEL_DIR, config.TENSORBOARD_DIR)\n\tconfig.CHECKPOINT_FOLDER = os.path.join(config.MODEL_DIR, 'data')\n\tconfig.VIDEO_DIR = os.path.join(config.MODEL_DIR, 'video_dir')\n\tconfig.AUDIO_DIR = os.path.join(config.MODEL_DIR, 'audio_dir')\n\tconfig.LOG_FILE = os.path.join(config.MODEL_DIR, config.LOG_FILE)\n\tif config.EVAL_CKPT_PATH == \"data/checkpoints\":\n\t\tconfig.EVAL_CKPT_PATH = os.path.join(config.MODEL_DIR, 'data')\n\n\tdirs = [config.VIDEO_DIR, config.AUDIO_DIR, config.TENSORBOARD_DIR, config.CHECKPOINT_FOLDER]\n\tif (run_type == 'train') and (not config.RESUME_AFTER_PREEMPTION):\n\t\t# check dirs\n\t\tif any([os.path.exists(d) for d in dirs]):\n\t\t\tfor d in dirs:\n\t\t\t\tif os.path.exists(d):\n\t\t\t\t\tprint('{} exists'.format(d))\n\t\t\tkey = input('Output directory already exists! Overwrite the folder? (y/n)')\n\t\t\tif key == 'y':\n\t\t\t\tfor d in dirs:\n\t\t\t\t\tif os.path.exists(d):\n\t\t\t\t\t\tshutil.rmtree(d)\n\n\tconfig.TASK_CONFIG.defrost()\n\n\t# ------------------ modifying SIMULATOR cfg --------------------\n\t# setting SIMULATOR'S USE_SYNC_VECENV flag\n\tconfig.TASK_CONFIG.SIMULATOR.USE_SYNC_VECENV = config.USE_SYNC_VECENV\n\n\t# setting max. number of steps of simulator\n\tconfig.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS = config.TASK_CONFIG.ENVIRONMENT.MAX_CONTEXT_LENGTH - 1\n\tconfig.TASK_CONFIG.SIMULATOR.MAX_EPISODE_STEPS = config.TASK_CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS\n\tconfig.TASK_CONFIG.SIMULATOR.MAX_CONTEXT_LENGTH = config.TASK_CONFIG.ENVIRONMENT.MAX_CONTEXT_LENGTH\n\tconfig.TASK_CONFIG.SIMULATOR.VISUAL_BUDGET = config.TASK_CONFIG.ENVIRONMENT.VISUAL_BUDGET\n\n\t# setting simulator attrs from task attrs\n\tconfig.TASK_CONFIG.SIMULATOR.EGO_LOCAL_OCC_MAP.SIZE = config.TASK_CONFIG.TASK.CONTEXT_EGO_LOCAL_MAP_SENSOR.SIZE\n\tconfig.TASK_CONFIG.SIMULATOR.EGO_LOCAL_OCC_MAP.NUM_CHANNELS = config.TASK_CONFIG.TASK.CONTEXT_EGO_LOCAL_MAP_SENSOR.NUM_CHANNELS\n\n\tconfig.TASK_CONFIG.SIMULATOR.EGO_STITCHED_GLOBAL_CANONICAL_OCC_MAP.SIZE =\\\n\t\tconfig.TASK_CONFIG.TASK.CONTEXT_STITCHED_EGO_LOCAL_MAP_SENSOR.SIZE\n\n\tconfig.TASK_CONFIG.SIMULATOR.GT_GLOBAL_CANONICAL_OCC_MAP_EGO_CROP.SIZE =\\\n\t\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.SIZE\n\tconfig.TASK_CONFIG.SIMULATOR.GT_GLOBAL_CANONICAL_OCC_MAP_EGO_CROP.NUM_CHANNELS =\\\n\t\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.NUM_CHANNELS\n\tconfig.TASK_CONFIG.SIMULATOR.GT_GLOBAL_CANONICAL_OCC_MAP_EGO_CROP.SCALE =\\\n\t\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.SCALE\n\n\tconfig.TASK_CONFIG.SIMULATOR.RGB_SENSOR.WIDTH = config.TASK_CONFIG.TASK.CONTEXT_RGB_SENSOR.WIDTH\n\tconfig.TASK_CONFIG.SIMULATOR.RGB_SENSOR.HEIGHT = config.TASK_CONFIG.TASK.CONTEXT_RGB_SENSOR.HEIGHT\n\n\tconfig.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.WIDTH = config.TASK_CONFIG.TASK.CONTEXT_DEPTH_SENSOR.WIDTH\n\tconfig.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.HEIGHT = config.TASK_CONFIG.TASK.CONTEXT_DEPTH_SENSOR.HEIGHT\n\tconfig.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.MIN_DEPTH = config.TASK_CONFIG.TASK.CONTEXT_DEPTH_SENSOR.MIN_DEPTH\n\tconfig.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.MAX_DEPTH = config.TASK_CONFIG.TASK.CONTEXT_DEPTH_SENSOR.MAX_DEPTH\n\tconfig.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.NORMALIZE_DEPTH = config.TASK_CONFIG.TASK.CONTEXT_DEPTH_SENSOR.NORMALIZE_DEPTH\n\tconfig.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.ADD_REDWOOD_NOISE = config.TASK_CONFIG.TASK.CONTEXT_DEPTH_SENSOR.ADD_REDWOOD_NOISE\n\tconfig.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.REDWOOD_NOISE_RAND_NUMS_PATH =\\\n\t\tconfig.TASK_CONFIG.TASK.CONTEXT_DEPTH_SENSOR.REDWOOD_NOISE_RAND_NUMS_PATH\n\tconfig.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.REDWOOD_NOISE_MULTIPLIER =\\\n\t\tconfig.TASK_CONFIG.TASK.CONTEXT_DEPTH_SENSOR.REDWOOD_NOISE_MULTIPLIER\n\tconfig.TASK_CONFIG.SIMULATOR.DEPTH_SENSOR.REDWOOD_DEPTH_NOISE_DIST_MODEL =\\\n\t\tconfig.TASK_CONFIG.TASK.CONTEXT_DEPTH_SENSOR.REDWOOD_DEPTH_NOISE_DIST_MODEL\n\n\tconfig.TASK_CONFIG.SIMULATOR.VIEW_POSE_SENSOR.ADD_TRUNCATED_GAUSSIAN_NOISE =\\\n\t\tconfig.TASK_CONFIG.TASK.CONTEXT_VIEW_POSE_SENSOR.ADD_TRUNCATED_GAUSSIAN_NOISE\n\tconfig.TASK_CONFIG.SIMULATOR.VIEW_POSE_SENSOR.TRUNCATED_GAUSSIAN_NOISE =\\\n\t\tconfig.TASK_CONFIG.TASK.CONTEXT_VIEW_POSE_SENSOR.TRUNCATED_GAUSSIAN_NOISE\n\n\t# ------------------------ modifying TASK cfg ----------------------\n\tconfig.TASK_CONFIG.TASK.CONTEXT_STITCHED_EGO_LOCAL_MAP_SENSOR.NUM_CHANNELS =\\\n\t\tconfig.TASK_CONFIG.TASK.CONTEXT_EGO_LOCAL_MAP_SENSOR.NUM_CHANNELS\n\n\tconfig.TASK_CONFIG.TASK.QUERY_STITCHED_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.SIZE =\\\n\t\tconfig.TASK_CONFIG.TASK.CONTEXT_STITCHED_EGO_LOCAL_MAP_SENSOR.SIZE\n\tconfig.TASK_CONFIG.TASK.QUERY_STITCHED_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.NUM_CHANNELS =\\\n\t\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.NUM_CHANNELS\n\n\tconfig.TASK_CONFIG.TASK.CONTEXT_RGB_SENSOR.FEATURE_SHAPE = [config.TASK_CONFIG.SIMULATOR.ALL_AGENTS.NUM,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig.TASK_CONFIG.SIMULATOR.MAX_CONTEXT_LENGTH,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig.TASK_CONFIG.TASK.CONTEXT_RGB_SENSOR.HEIGHT,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig.TASK_CONFIG.TASK.CONTEXT_RGB_SENSOR.WIDTH,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t3]\n\n\tconfig.TASK_CONFIG.TASK.CONTEXT_EGO_LOCAL_MAP_SENSOR.FEATURE_SHAPE = [config.TASK_CONFIG.SIMULATOR.ALL_AGENTS.NUM,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config.TASK_CONFIG.SIMULATOR.MAX_CONTEXT_LENGTH,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config.TASK_CONFIG.TASK.CONTEXT_EGO_LOCAL_MAP_SENSOR.SIZE,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config.TASK_CONFIG.TASK.CONTEXT_EGO_LOCAL_MAP_SENSOR.SIZE,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config.TASK_CONFIG.TASK.CONTEXT_EGO_LOCAL_MAP_SENSOR.NUM_CHANNELS]\n\n\tconfig.TASK_CONFIG.TASK.CONTEXT_STITCHED_EGO_LOCAL_MAP_SENSOR.FEATURE_SHAPE =\\\n\t\t[config.TASK_CONFIG.SIMULATOR.MAX_CONTEXT_LENGTH - 1,\n\t\t config.TASK_CONFIG.TASK.CONTEXT_STITCHED_EGO_LOCAL_MAP_SENSOR.SIZE,\n\t\t config.TASK_CONFIG.TASK.CONTEXT_STITCHED_EGO_LOCAL_MAP_SENSOR.SIZE,\n\t\t config.TASK_CONFIG.TASK.CONTEXT_STITCHED_EGO_LOCAL_MAP_SENSOR.NUM_CHANNELS]\n\n\tconfig.TASK_CONFIG.TASK.CONTEXT_VIEW_POSE_SENSOR.FEATURE_SHAPE = [config.TASK_CONFIG.SIMULATOR.ALL_AGENTS.NUM,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config.TASK_CONFIG.SIMULATOR.MAX_CONTEXT_LENGTH,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 5]\n\n\tconfig.TASK_CONFIG.TASK.CONTEXT_VIEW_R_N_AZ_SENSOR.FEATURE_SHAPE = [config.TASK_CONFIG.SIMULATOR.ALL_AGENTS.NUM,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig.TASK_CONFIG.SIMULATOR.MAX_CONTEXT_LENGTH,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tconfig.TASK_CONFIG.TASK.CONTEXT_VIEW_R_N_AZ_SENSOR.FEATURE_SHAPE[-1]]\n\n\tconfig.TASK_CONFIG.TASK.CONTEXT_OTHER_AUDIO_POSE_SENSOR.FEATURE_SHAPE = config.TASK_CONFIG.TASK.CONTEXT_VIEW_POSE_SENSOR.FEATURE_SHAPE\n\n\tconfig.TASK_CONFIG.TASK.AMBI_WAV_SENSOR.FEATURE_SHAPE[0] =\\\n\t\tconfig.TASK_CONFIG.SIMULATOR.AUDIO.MAX_VALID_IMPULSE_LENGTH_AFTER_REMOVING_LEADING_ZEROS\n\n\tconfig.TASK_CONFIG.TASK.CONTEXT_SELF_AUDIO_SENSOR.FEATURE_SHAPE[0] = config.TASK_CONFIG.SIMULATOR.ALL_AGENTS.NUM\n\tconfig.TASK_CONFIG.TASK.CONTEXT_SELF_AUDIO_SENSOR.FEATURE_SHAPE[1] = config.TASK_CONFIG.ENVIRONMENT.MAX_CONTEXT_LENGTH\n\tconfig.TASK_CONFIG.TASK.CONTEXT_SELF_AUDIO_SENSOR.FEATURE_SHAPE[2] = config.TASK_CONFIG.TASK.AMBI_WAV_SENSOR.FEATURE_SHAPE[0]\n\n\tconfig.TASK_CONFIG.TASK.CONTEXT_OTHER_AUDIO_SENSOR.FEATURE_SHAPE[0] = config.TASK_CONFIG.SIMULATOR.ALL_AGENTS.NUM\n\tconfig.TASK_CONFIG.TASK.CONTEXT_OTHER_AUDIO_SENSOR.FEATURE_SHAPE[1] =\\\n\t\tconfig.TASK_CONFIG.ENVIRONMENT.MAX_CONTEXT_LENGTH * (config.TASK_CONFIG.SIMULATOR.ALL_AGENTS.NUM - 1)\n\tconfig.TASK_CONFIG.TASK.CONTEXT_OTHER_AUDIO_SENSOR.FEATURE_SHAPE[2] = config.TASK_CONFIG.TASK.AMBI_WAV_SENSOR.FEATURE_SHAPE[0]\n\n\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.FEATURE_SHAPE =\\\n\t\t[config.TASK_CONFIG.SIMULATOR.ALL_AGENTS.NUM,\n\t\t config.TASK_CONFIG.SIMULATOR.MAX_CONTEXT_LENGTH,\n\t\t config.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.SIZE,\n\t\t config.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.SIZE,\n\t\t config.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.NUM_CHANNELS,\n\t\t ]\n\n\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_EXPLORED_PART_MASK_SENSOR.FEATURE_SHAPE =\\\n\t\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.FEATURE_SHAPE\n\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_EXPLORED_PART_MASK_SENSOR.SIZE =\\\n\t\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.SIZE\n\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_EXPLORED_PART_MASK_SENSOR.NUM_CHANNELS =\\\n\t\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.NUM_CHANNELS\n\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_EXPLORED_PART_MASK_SENSOR.SCALE =\\\n\t\tconfig.TASK_CONFIG.TASK.QUERY_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.SCALE\n\n\tconfig.TASK_CONFIG.TASK.QUERY_STITCHED_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.FEATURE_SHAPE =\\\n\t\tconfig.TASK_CONFIG.TASK.CONTEXT_STITCHED_EGO_LOCAL_MAP_SENSOR.FEATURE_SHAPE\n\tconfig.TASK_CONFIG.TASK.QUERY_STITCHED_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.FEATURE_SHAPE[-1] =\\\n\t\tconfig.TASK_CONFIG.TASK.QUERY_STITCHED_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR.NUM_CHANNELS\n\n\tconfig.TASK_CONFIG.TASK.CONTEXT_AUDIO_MASK_SENSOR.FEATURE_SHAPE = [config.TASK_CONFIG.SIMULATOR.ALL_AGENTS.NUM, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config.TASK_CONFIG.SIMULATOR.MAX_CONTEXT_LENGTH,]\n\tconfig.TASK_CONFIG.TASK.ALL_CONTEXT_AUDIO_MASK_SENSOR.FEATURE_SHAPE = config.TASK_CONFIG.TASK.CONTEXT_AUDIO_MASK_SENSOR.FEATURE_SHAPE\n\tconfig.TASK_CONFIG.TASK.PREV_CONTEXT_VIEW_MASK_SENSOR.FEATURE_SHAPE = config.TASK_CONFIG.TASK.CONTEXT_AUDIO_MASK_SENSOR.FEATURE_SHAPE\n\n\tconfig.TASK_CONFIG.TASK.QUERY_MASK_SENSOR.FEATURE_SHAPE = [config.TASK_CONFIG.SIMULATOR.ALL_AGENTS.NUM,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t config.TASK_CONFIG.SIMULATOR.MAX_CONTEXT_LENGTH]\n\tconfig.TASK_CONFIG.TASK.ALL_QUERY_MASK_SENSOR.FEATURE_SHAPE = config.TASK_CONFIG.TASK.QUERY_MASK_SENSOR.FEATURE_SHAPE\n\n\t# -------------------------------- updating sim misc. config for transfer from passive to active mapping -------------------------------------\n\tconfig.TASK_CONFIG.SIMULATOR.STITCH_TOP_DOWN_MAPS = config.STITCH_TOP_DOWN_MAPS_ACTIVE_MAPPING\n\tconfig.TASK_CONFIG.SIMULATOR.SIM_ENV = config.TASK_CONFIG.ENVIRONMENT\n\tconfig.TASK_CONFIG.SIMULATOR.SIM_TASK = config.TASK_CONFIG.TASK\n\tconfig.TASK_CONFIG.SIMULATOR.SIM_TRAINER = config.PassiveMapping\n\n\tif config.STITCH_TOP_DOWN_MAPS_ACTIVE_MAPPING:\n\t\tconfig.TASK_CONFIG.TASK.SENSORS += [\"EPISODE_SCENE_IDX_SENSOR\",\n\t\t\t\t\t\t\t\t\t\t\t\"EPISODE_REF_RECEIVER_AZIMUTH_SENSOR\",\n\t\t\t\t\t\t\t\t\t\t\t\"QUERY_STITCHED_GT_GLOB_CAN_MAP_EGO_CROP_SENSOR\",\n\t\t\t\t\t\t\t\t\t\t\t\"CONTEXT_VIEW_R_N_AZ_SENSOR\"]\n\n\tif \"CONTEXT_VIEW_R_N_AZ_SENSOR\" not in config.TASK_CONFIG.TASK.SENSORS:\n\t\tconfig.TASK_CONFIG.TASK.SENSORS.append(\"CONTEXT_VIEW_R_N_AZ_SENSOR\")\n\n\tif (run_type not in [\"train\"]) and (config.EVAL.SPLIT is not None) and (config.EVAL.SPLIT[:3] == \"val\"):\n\t\tconfig.PassiveMapping.TrainLosses.types = []\n\t\tconfig.PassiveMapping.EvalMetrics.types = []\n\n\tconfig.TASK_CONFIG.DATASET.EVAL_SPLIT = config.EVAL.SPLIT\n\tconfig.TASK_CONFIG.DATASET.EVAL_EPISODE_COUNT = config.EVAL.EPISODE_COUNT\n\n\tif run_type == \"eval\":\n\t\tconfig.TASK_CONFIG.ENVIRONMENT.VISUAL_BUDGET_COMPARE_AGAINST_LAST_TOP_DOWN_MAPS = config.TASK_CONFIG.ENVIRONMENT.VISUAL_BUDGET\n\t\tconfig.TASK_CONFIG.ENVIRONMENT.VISUAL_BUDGET =\\\n\t\t\tconfig.TASK_CONFIG.ENVIRONMENT.MAX_CONTEXT_LENGTH * config.TASK_CONFIG.SIMULATOR.ALL_AGENTS.NUM\n\t\tconfig.TASK_CONFIG.SIMULATOR.VISUAL_BUDGET = config.TASK_CONFIG.ENVIRONMENT.VISUAL_BUDGET\n\n\tconfig.TASK_CONFIG.freeze()\n\n\tconfig.freeze()\n\n\t# ---------------------------- assertions for metrics --------------------------------\n\tif (config.TRAINER_NAME == \"chat2map\") and (run_type == \"train\"):\n\t\tassert config.PassiveMapping.EvalMetrics.type_for_ckpt_dump in config.PassiveMapping.EvalMetrics.types\n\n\treturn config" } ]
import argparse import logging import warnings import tensorflow as tf import torch from chat2map.common.baseline_registry import baseline_registry from chat2map.config.default import get_config from habitat_audio import *
3,998
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. warnings.filterwarnings('ignore', category=FutureWarning) warnings.filterwarnings('ignore', category=UserWarning) def main(): # command line args parser = argparse.ArgumentParser() parser.add_argument( "--run-type", choices=["train", "eval"], default='train', help="run type of the experiment (train or eval)", ) parser.add_argument( "--exp-config", type=str, default='baselines/config/pointnav_rgb.yaml', help="path to config yaml containing info about experiment", ) parser.add_argument( "opts", default=None, nargs=argparse.REMAINDER, help="Modify config options from command line", ) parser.add_argument( "--model-dir", default=None, help="Modify config options from command line", ) parser.add_argument( "--eval-interval", type=int, default=1, help="Evaluation interval of checkpoints", ) parser.add_argument( "--prev-ckpt-ind", type=int, default=-1, help="Evaluation interval of checkpoints", ) args = parser.parse_args() # run exp config = get_config(args.exp_config, args.opts, args.model_dir, args.run_type)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. warnings.filterwarnings('ignore', category=FutureWarning) warnings.filterwarnings('ignore', category=UserWarning) def main(): # command line args parser = argparse.ArgumentParser() parser.add_argument( "--run-type", choices=["train", "eval"], default='train', help="run type of the experiment (train or eval)", ) parser.add_argument( "--exp-config", type=str, default='baselines/config/pointnav_rgb.yaml', help="path to config yaml containing info about experiment", ) parser.add_argument( "opts", default=None, nargs=argparse.REMAINDER, help="Modify config options from command line", ) parser.add_argument( "--model-dir", default=None, help="Modify config options from command line", ) parser.add_argument( "--eval-interval", type=int, default=1, help="Evaluation interval of checkpoints", ) parser.add_argument( "--prev-ckpt-ind", type=int, default=-1, help="Evaluation interval of checkpoints", ) args = parser.parse_args() # run exp config = get_config(args.exp_config, args.opts, args.model_dir, args.run_type)
trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
0
2023-12-06 01:20:37+00:00
8k
wrongbad/badcad
badcad/badcad.py
[ { "identifier": "display", "path": "badcad/utils.py", "snippet": "def display(thing, \n vscode_fix=True, \n wireframe=False, \n color='#aaaa22', \n smoothing_threshold=-1,\n width=640,\n height=640,\n ):\n if vscode_fix:\n fix_vscode_style()\n \n if isinstance(thing, (tuple, list)):\n verts, tris = thing\n elif hasattr(thing, 'to_mesh'):\n m = thing.to_mesh()\n verts = m.vert_properties[...,:3].astype(np.float32)\n tris = m.tri_verts.astype(np.uint32)\n else:\n raise ValueError(f'unsupported thing: {type(thing)}')\n\n box0 = np.min(verts, axis=0)\n box1 = np.max(verts, axis=0)\n\n sz = np.linalg.norm(box1-box0)\n mid = (box0+box1)/2\n\n verts = verts - mid\n tnormals = triangle_normals(verts, tris)\n vnormals = smooth_normals(tris, tnormals, smoothing_threshold)\n verts = verts[tris]\n index = np.arange(tris.size, dtype=np.uint32)\n\n geometry = pythreejs.BufferGeometry(\n attributes = dict(\n position = pythreejs.BufferAttribute(verts),\n normal = pythreejs.BufferAttribute(vnormals),\n ),\n index = pythreejs.BufferAttribute(index)\n )\n\n material = pythreejs.MeshPhysicalMaterial(\n color = color,\n reflectivity = 0.2,\n clearCoat = 0.6,\n clearCoatRoughness = 0.7,\n wireframe = wireframe,\n );\n\n threemesh = pythreejs.Mesh(geometry, material)\n\n lights = [\n pythreejs.DirectionalLight(\n color='white', \n position=l[:3],\n intensity=l[3],\n )\n for l in [\n (-40, 5, 40, 0.5), \n (0, 0, 40, 0.2), \n (20, 5, -20, 0.1), \n ]\n ]\n\n camera = pythreejs.PerspectiveCamera(\n position=[0, 0, sz*1.3], \n up=[0, 1, 0], \n children=lights,\n )\n\n controls = pythreejs.OrbitControls(\n controlling=camera, \n rotateSpeed=1.0, \n zoomSpeed=0.5,\n enableZoom=False, # avoid notbook scroll conflict\n )\n\n scene = pythreejs.Scene(\n children=[\n threemesh,\n camera, \n pythreejs.AmbientLight(color='#aaf')\n ], \n background=None,\n )\n\n return pythreejs.Renderer(\n camera=camera,\n scene=scene,\n alpha=True,\n clearOpacity=0.2,\n controls=[controls],\n width=width, \n height=height,\n )" }, { "identifier": "triangle_normals", "path": "badcad/utils.py", "snippet": "def triangle_normals(verts, tris):\n a = verts[tris[:,1]] - verts[tris[:,0]]\n b = verts[tris[:,2]] - verts[tris[:,1]]\n tnormals = np.cross(a, b)\n tnormals /= np.linalg.norm(tnormals, axis=-1, keepdims=True)\n return tnormals" }, { "identifier": "polygon_nearest_alignment", "path": "badcad/utils.py", "snippet": "def polygon_nearest_alignment(va, vb):\n dist = lambda x: np.sum(x ** 2, axis=-1)\n j0 = np.argmin(dist(vb - va[0]))\n i, j = 0, j0\n na, nb = len(va), len(vb)\n out = []\n while True:\n ip1, jp1 = (i+1)%na, (j+1)%nb\n d0 = dist(va[ip1] - vb[j])\n d1 = dist(va[i] - vb[jp1])\n if d0 < d1:\n out += [[ip1, j]]\n i = ip1\n else:\n out += [[i, jp1]]\n j = jp1\n if (i,j) == (0, j0):\n break\n return out" }, { "identifier": "svg2polygons", "path": "badcad/utils.py", "snippet": "def svg2polygons(svg, fn=8):\n import svgelements\n # this lib handles transforms and `use` tags\n svg = svgelements.SVG.parse(BytesIO(svg))\n polys = []\n for e in svg.elements():\n if isinstance(e, svgelements.Path):\n # TODO policy for unclosed paths\n p = PolyPath(fn=fn)\n for s in e.segments():\n if isinstance(s, svgelements.Move):\n p.move(s.end)\n elif isinstance(s, svgelements.Line):\n p.line(s.end)\n elif isinstance(s, svgelements.QuadraticBezier):\n p.bez([s.control1, s.end])\n elif isinstance(s, svgelements.CubicBezier):\n p.bez([s.control1, s.control2, s.end])\n elif isinstance(s, svgelements.Close):\n p.close()\n else:\n raise ValueError(f'unsupported segment: {type(s)}')\n polys += p.polys\n return polys" }, { "identifier": "text2svg", "path": "badcad/utils.py", "snippet": "def text2svg(text, size=10, font=\"Helvetica\"):\n import cairo\n memfile = BytesIO()\n with cairo.SVGSurface(memfile, size, size) as surface:\n ctx = cairo.Context(surface)\n ctx.set_font_size(size)\n ctx.select_font_face(font,\n cairo.FONT_SLANT_NORMAL,\n cairo.FONT_WEIGHT_NORMAL)\n ctx.show_text(text)\n return memfile.getvalue()" }, { "identifier": "PolyPath", "path": "badcad/utils.py", "snippet": "class PolyPath:\n def __init__(self, fn=32):\n self.polys = []\n self.poly = []\n self.pos = (0,0)\n self.fn = fn\n\n def move(self, p):\n self.pos = p\n return self\n \n def line(self, p):\n if len(self.poly) == 0:\n self.poly += [self.pos]\n self.poly += [p]\n self.pos = p\n return self\n\n def bez(self, pts, fn=0):\n if len(self.poly) == 0:\n self.poly += [self.pos]\n fn = fn or self.fn\n vs = [p[0]+p[1]*1j for p in [self.pos, *pts]]\n for i in range(1, fn):\n n = len(vs) - 1\n t = i / fn\n u = 1 - t\n c = u ** n\n v = 0\n for j in range(len(vs)):\n v += c * vs[j]\n c *= t * (n-j) / (u * (1+j))\n self.poly += [(v.real, v.imag)]\n self.poly += [pts[-1]]\n self.pos = pts[-1]\n return self\n\n def close(self):\n self.polys += [self.poly]\n self.poly = []" } ]
import manifold3d import numpy as np from manifold3d import Manifold, CrossSection from .utils import ( display, triangle_normals, polygon_nearest_alignment, svg2polygons, text2svg, PolyPath )
4,491
def num_vert(self): return self.cross_section.num_vert() def offset(self, delta, join_type='miter', miter_limit=2, circular_segments=0): if join_type == 'round': join_type = manifold3d.JoinType.Round elif join_type == 'miter': join_type = manifold3d.JoinType.Miter elif join_type == 'square': join_type = manifold3d.JoinType.Square else: raise ValueError(f'{join_type=}') return Shape(self.cross_section.offset( delta, join_type, miter_limit, circular_segments )) def revolve(self, z=360, fn=0): return Solid(self.cross_section.revolve( circular_segments=fn, revolve_degrees=z, )) def rotate(self, z): return Shape(self.cross_section.rotate(z)) def scale(self, x=1, y=1): return Shape(self.cross_section.scale((x, y))) def simplify(self, eps): return Shape(self.cross_section.simplify(eps)) def to_polygons(self): return self.cross_section.to_polygons() def transform(self, matrix): return Shape(self.cross_section.transform(matrix)) def move(self, x=0, y=0): return Shape(self.cross_section.translate((x,y))) def warp(self, xy_map_func): return Shape(self.cross_section.warp(xy_map_func)) def warp_batch(self, xy_map_func): return Shape(self.cross_section.warp_batch(xy_map_func)) def get_circular_segments(radius): return manifold3d.get_circular_segments(radius) def set_circular_segments(nseg): manifold3d.set_circular_segments(nseg) def set_min_circular_angle(degrees): manifold3d.set_min_circular_angle(degrees) def set_min_circular_edge_length(length): manifold3d.set_min_circular_edge_length(length) def hull(*solids): mans = [s.manifold for s in solids] return Solid(Manifold.batch_hull(mans)) def hull_points(points): return Shape(Manifold.hull_points(points)) def hull2d(*shapes): sects = [s.cross_section for s in shapes] return Shape(CrossSection.batch_hull(sects)) def hull2d_points(points): return Shape(CrossSection.hull_points(points)) def cube(x=1, y=1, z=1, center=False): return Solid(Manifold.cube((x, y, z), center=center)) def cylinder(h=1, d=1, r=None, center=False, fn=0, outer=False): r = r or d/2 fn = fn or get_circular_segments(r) s = 1/np.cos(np.pi/fn) if outer else 1 return Solid(Manifold.cylinder( h, r*s, r*s, circular_segments=fn, center=center)) def conic(h=1, d1=1, d2=1, r1=None, r2=None, center=False, fn=0, outer=False): r1 = r1 or d1/2 r2 = r2 or d2/2 fn = fn or get_circular_segments(max(r1,r2)) s = 1/np.cos(np.pi/fn) if outer else 1 return Solid(Manifold.cylinder( h, r1*s, r2*s, circular_segments=fn, center=center)) def sphere(d=1, r=None, fn=0): r = r or d/2 return Solid(Manifold.sphere(r, fn)) def circle(d=1, r=None, fn=0, outer=False): r = r or d/2 fn = fn or get_circular_segments(r) s = 1/np.cos(np.pi/fn) if outer else 1 return Shape(CrossSection.circle(r*s, fn)) def square(x=1, y=1, center=False): return Shape(CrossSection.square((x, y), center=center)) def polygon(points, fill_rule='even_odd'): if fill_rule == 'even_odd': fill_rule = manifold3d.FillRule.EvenOdd elif fill_rule == 'negative': fill_rule = manifold3d.FillRule.Negative elif fill_rule == 'non_zero': fill_rule = manifold3d.FillRule.NonZero elif fill_rule == 'positive': fill_rule = manifold3d.FillRule.Positive else: raise ValueError(f'{fill_rule=}') return Shape(CrossSection([points], fillrule=fill_rule)) def text(t, size=10, font="Helvetica", fn=8):
# wrapper for Manifold # adds jupyter preview & tweaks API class Solid: def __init__(self, manifold = Manifold()): self.manifold = manifold # TODO add visual properties (e.g. color, texture) def _repr_mimebundle_(self, **kwargs): if self.is_empty(): return None raw_mesh = self.to_mesh() verts = raw_mesh.vert_properties.astype(np.float32) tris = raw_mesh.tri_verts.astype(np.uint32) renderer = display((verts, tris)) return renderer._repr_mimebundle_(**kwargs) def __add__(self, other): return Solid(self.manifold + other.manifold) def __sub__(self, other): return Solid(self.manifold - other.manifold) def __and__(self, other): # manifold3d XOR is actually AND return Solid(self.manifold ^ other.manifold) def as_original(self): return Solid(self.manifold.as_original()) def bounding_box(self): return self.manifold.bounding_box() def calculate_curvature(self, gaussian_idx: int, mean_idx: int): return Solid(self.manifold.calculate_curvature(gaussian_idx, mean_idx)) def align(self, xmin=None, x=None, xmax=None, ymin=None, y=None, ymax=None, zmin=None, z=None, zmax=None): x0, y0, z0, x1, y1, z1 = self.bounding_box() dx, dy, dz = 0, 0, 0 if xmin is not None: dx = xmin-x0 if x is not None: dx = x-(x0+x1)/2 if xmax is not None: dx = xmax-x1 if ymin is not None: dy = ymin-y0 if y is not None: dy = y-(y0+y1)/2 if ymax is not None: dy = ymax-y1 if zmin is not None: dz = zmin-z0 if z is not None: dz = z-(z0+z1)/2 if zmax is not None: dz = zmax-z1 return self.move(dx, dy, dz) def decompose(self): return [Solid(m) for m in self.manifold.decompose()] def genus(self): return self.manifold.get_genus() def get_surface_area(self): return self.manifold.get_surface_area() def get_volume(self): return self.manifold.get_volume() def hull(self, *others): return Solid(Manifold.batch_hull([self.manifold, *[o.manifold for o in others]])) def is_empty(self): return self.manifold.is_empty() def mirror(self, x=0, y=0, z=0): return Solid(self.manifold.mirror((x, y, z))) def num_edge(self): return self.manifold.num_edge() def num_prop(self): return self.manifold.num_prop() def num_prop_vert(self): return self.manifold.num_prop_vert() def num_tri(self): return self.manifold.num_tri() def num_vert(self): return self.manifold.num_vert() def original_id(self): return self.manifold.original_id() def precision(self): return self.manifold.precision() def refine(self, n=2): return Solid(self.manifold.refine(n)) def rotate(self, x=0, y=0, z=0): return Solid(self.manifold.rotate((x, y, z))) def scale(self, x=1, y=1, z=1): return Solid(self.manifold.scale((x, y, z))) def set_properties(self, *args, **kwargs): raise ValueError("not implemented") def split(self, cutter): inter, diff = self.manifold.split(cutter) return Solid(inter), Solid(diff) def split_by_plane(self, x=0, y=0, z=0, offset=0): top, bottom = self.manifold.split_by_plane((x, y, z), offset) return Solid(top), Solid(bottom) def status(self): return self.manifold.status def to_mesh(self, normal_idx=[0,0,0]): return self.manifold.to_mesh(normal_idx) def transform(self, matrix): return Solid(self.manifold.transform(matrix)) def move(self, x=0, y=0, z=0): return Solid(self.manifold.translate((x,y,z))) def trim_by_plane(self, x=0, y=0, z=0, offset=0): return Solid(self.manifold.trim_by_plane((x, y, z), offset)) def warp(self, xyz_map_fn): return Solid(self.manifold.warp(xyz_map_fn)) def warp_batch(self, xyz_map_fn): return Solid(self.manifold.warp_batch(xyz_map_fn)) def refine_to_length(self, edge_len): m = self.manifold.to_mesh() verts = m.vert_properties.tolist() tris = m.tri_verts.tolist() mids = {} i = 0 while i < len(tris): tri = tris[i] v = [verts[i] for i in tri] dv = v - np.roll(v, 1, 0) lens = np.linalg.norm(dv, axis=-1) mi = np.argmax(lens) if lens[mi] > edge_len: key = (min(tri[mi],tri[mi-1]), max(tri[mi],tri[mi-1])) if key not in mids: mididx = len(verts) midv = [(v[mi][j] + v[mi-1][j])/2 for j in [0,1,2]] verts += [midv] mids[key] = mididx else: mididx = mids[key] tri2 = [*tri] tri2[mi-1] = mididx tris += [tri2] tri[mi] = mididx else: i += 1 verts = np.array(verts, np.float32) tris = np.array(tris, np.int32) m = manifold3d.Mesh(verts, tris, face_id=np.arange(len(tris))) return Solid(Manifold(m)) def stl(self, fname=None): mesh = self.to_mesh() tris = mesh.tri_verts.astype(np.uint32) verts = mesh.vert_properties.astype(np.float32) tnormals = triangle_normals(verts, tris) ntris = tris.shape[0] header = np.zeros(21, dtype=np.uint32) header[20] = ntris body = np.zeros((ntris, 50), dtype=np.uint8) body[:, 0:12] = tnormals.view(np.uint8) body[:, 12:24] = verts[tris[:,0]].view(np.int8) body[:, 24:36] = verts[tris[:,1]].view(np.int8) body[:, 36:48] = verts[tris[:,2]].view(np.int8) binary = header.tobytes() + body.tobytes() if fname: with open(fname, 'wb') as f: f.write(binary) return self else: return binary class Shape: def __init__(self, cross_section = CrossSection()): self.cross_section = cross_section def _repr_mimebundle_(self, **kwargs): # called by jupyter to figure out how to display this object # we create a scene on the fly with ability to customize # controls and lights, etc. return self.extrude(1e-9)._repr_mimebundle_(**kwargs) def __add__(self, other): return Shape(self.cross_section + other.cross_section) def __sub__(self, other): return Shape(self.cross_section - other.cross_section) def __and__(self, other): # manifold3d XOR is actually AND return Shape(self.cross_section ^ other.cross_section) def area(self): return self.cross_section.area() def bounds(self): return self.cross_section.bounds() def align(self, xmin=None, x=None, xmax=None, ymin=None, y=None, ymax=None): x0, y0, x1, y1 = self.bounds() dx, dy = 0, 0 if xmin is not None: dx = xmin-x0 if x is not None: dx = x-(x0+x1)/2 if xmax is not None: dx = xmax-x1 if ymin is not None: dy = ymin-y0 if y is not None: dy = y-(y0+y1)/2 if ymax is not None: dy = ymax-y1 return self.move(dx, dy) def decompose(self): return [Shape(p) for p in self.cross_section.decompose()] def extrude(self, height, fn=0, twist=0, scale_top=(1,1), center=False): s = Solid(self.cross_section.extrude( height, n_divisions=fn, twist_degrees=twist, scale_top=scale_top, )) return s.move(z=-height/2) if center else s def extrude_to(self, other, height, center=False): polys1 = self.to_polygons() assert len(polys1) == 1, 'extrude_to only supports simple polygons' verts1 = np.pad(polys1[0], [[0,0],[0,1]], constant_values=0) N1 = verts1.shape[0] polys2 = other.to_polygons() assert len(polys2) == 1, 'extrude_to only supports simple polygons' verts2 = np.pad(polys2[0], [[0,0],[0,1]], constant_values=height) # flip the bottom over tris1 = manifold3d.triangulate(polys1) tmp = tris1[:, 1].copy() tris1[:, 1] = tris1[:, 2] tris1[:, 2] = tmp # offset top vertex indices tris2 = manifold3d.triangulate(polys2) tris2 += N1 alignment = polygon_nearest_alignment(verts1, verts2) alignment = [(a, b+N1) for a, b in alignment] # build the skirt faces tris3 = [] for s in range(len(alignment)): i, j = alignment[s] pi, pj = alignment[s-1] if i != pi: tris3 += [[pi, i, pj]] if j != pj: tris3 += [[i, j, pj]] tris3 = np.array(tris3) verts = np.concatenate((verts1, verts2)) tris = np.concatenate((tris1, tris2, tris3)) mesh = manifold3d.Mesh(verts, tris) s = Solid(Manifold(mesh)) return s.move(z=-height/2) if center else s def hull(self, *others): return Shape(CrossSection.batch_hull([self.cross_section, *[o.cross_section for o in others]])) def is_empty(self): return self.cross_section.is_empty() def mirror(self, x=0, y=0): return Shape(self.cross_section.mirror((x, y))) def num_contour(self): return self.cross_section.num_contour() def num_vert(self): return self.cross_section.num_vert() def offset(self, delta, join_type='miter', miter_limit=2, circular_segments=0): if join_type == 'round': join_type = manifold3d.JoinType.Round elif join_type == 'miter': join_type = manifold3d.JoinType.Miter elif join_type == 'square': join_type = manifold3d.JoinType.Square else: raise ValueError(f'{join_type=}') return Shape(self.cross_section.offset( delta, join_type, miter_limit, circular_segments )) def revolve(self, z=360, fn=0): return Solid(self.cross_section.revolve( circular_segments=fn, revolve_degrees=z, )) def rotate(self, z): return Shape(self.cross_section.rotate(z)) def scale(self, x=1, y=1): return Shape(self.cross_section.scale((x, y))) def simplify(self, eps): return Shape(self.cross_section.simplify(eps)) def to_polygons(self): return self.cross_section.to_polygons() def transform(self, matrix): return Shape(self.cross_section.transform(matrix)) def move(self, x=0, y=0): return Shape(self.cross_section.translate((x,y))) def warp(self, xy_map_func): return Shape(self.cross_section.warp(xy_map_func)) def warp_batch(self, xy_map_func): return Shape(self.cross_section.warp_batch(xy_map_func)) def get_circular_segments(radius): return manifold3d.get_circular_segments(radius) def set_circular_segments(nseg): manifold3d.set_circular_segments(nseg) def set_min_circular_angle(degrees): manifold3d.set_min_circular_angle(degrees) def set_min_circular_edge_length(length): manifold3d.set_min_circular_edge_length(length) def hull(*solids): mans = [s.manifold for s in solids] return Solid(Manifold.batch_hull(mans)) def hull_points(points): return Shape(Manifold.hull_points(points)) def hull2d(*shapes): sects = [s.cross_section for s in shapes] return Shape(CrossSection.batch_hull(sects)) def hull2d_points(points): return Shape(CrossSection.hull_points(points)) def cube(x=1, y=1, z=1, center=False): return Solid(Manifold.cube((x, y, z), center=center)) def cylinder(h=1, d=1, r=None, center=False, fn=0, outer=False): r = r or d/2 fn = fn or get_circular_segments(r) s = 1/np.cos(np.pi/fn) if outer else 1 return Solid(Manifold.cylinder( h, r*s, r*s, circular_segments=fn, center=center)) def conic(h=1, d1=1, d2=1, r1=None, r2=None, center=False, fn=0, outer=False): r1 = r1 or d1/2 r2 = r2 or d2/2 fn = fn or get_circular_segments(max(r1,r2)) s = 1/np.cos(np.pi/fn) if outer else 1 return Solid(Manifold.cylinder( h, r1*s, r2*s, circular_segments=fn, center=center)) def sphere(d=1, r=None, fn=0): r = r or d/2 return Solid(Manifold.sphere(r, fn)) def circle(d=1, r=None, fn=0, outer=False): r = r or d/2 fn = fn or get_circular_segments(r) s = 1/np.cos(np.pi/fn) if outer else 1 return Shape(CrossSection.circle(r*s, fn)) def square(x=1, y=1, center=False): return Shape(CrossSection.square((x, y), center=center)) def polygon(points, fill_rule='even_odd'): if fill_rule == 'even_odd': fill_rule = manifold3d.FillRule.EvenOdd elif fill_rule == 'negative': fill_rule = manifold3d.FillRule.Negative elif fill_rule == 'non_zero': fill_rule = manifold3d.FillRule.NonZero elif fill_rule == 'positive': fill_rule = manifold3d.FillRule.Positive else: raise ValueError(f'{fill_rule=}') return Shape(CrossSection([points], fillrule=fill_rule)) def text(t, size=10, font="Helvetica", fn=8):
polys = svg2polygons(text2svg(t, size=size, font=font), fn=fn)
4
2023-12-11 01:48:22+00:00
8k
PeriniM/Rotary-Pendulum-RL
control/pid/src/main_energy.py
[ { "identifier": "RealPendulumEnv", "path": "control/reinforcement_learning/Environments/RealPendulumEnv.py", "snippet": "class RealPendulumEnv(gym.Env):\n \"\"\"\n Real rotary pendulum with ESP32\n \"\"\"\n\n metadata = {\"render_modes\": [\"human\"]}\n\n def __init__(self, port, baudrate, render_mode=\"human\"):\n super(RealPendulumEnv, self).__init__()\n \"\"\"\n Initialize the environment.\n \n Args:\n port (str): The serial port to connect to.\n baudrate (int): The baudrate to use for the serial connection.\n render_mode (str, optional): The render mode. Defaults to \"human\".\n\n Returns:\n None\n \"\"\"\n\n self.ser = serial.Serial(\n port=port,\n baudrate=baudrate,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n )\n self.reader = SerialReader(self.ser, simulation=False)\n self.reader.start()\n self.render_mode = render_mode\n self.name = \"RealPendulum\"\n self.nbJoint = 1\n self.num_state = 2\n self.action = 0.0\n self.motorAngle = 0.0\n self.terminated = False\n self.truncated = False\n self.iterCount = 0\n self.maxIter = 1000\n self.omega_max = 10.0\n self.range_actions = np.array([-1.0, 1.0])\n self.range_observation = np.array([-1.0, 1.0])\n self.observation_space = spaces.Box(low=self.range_observation[0], high=self.range_observation[1], shape=(self.num_state,), dtype=np.float32)\n self.action_space = spaces.Box(low=self.range_actions[0], high=self.range_actions[1], shape=(1,), dtype=np.float32)\n # variable to store angles of one episode\n self.episode_angles = []\n \n def reset(self, seed=None, options=None):\n \"\"\"\n Reset the environment to the initial state.\n\n Args:\n None\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n info (dict): Episode information\n \"\"\"\n\n super().reset(seed=seed, options=options)\n\n # Reset the episode angles\n self.episode_angles = []\n\n # Send command to pendulum to go to home position.\n self.send_serial(\"0,1\")\n # Wait for the pendulum to report it has finished resetting.\n while (1):\n self.observation_space, self.motorAngle, self.terminated = self.reader.get_state()\n if not self.terminated:\n break\n\n # Reset iteration count\n self.iterCount = 0\n self.info = {\"episode\": {\"r\": 0.0, \"l\": self.iterCount}}\n\n return self.observation_space.astype(np.float32), self.info\n \n def step(self, action):\n \"\"\"\n Take a step in the environment\n\n Args:\n action (float): Motor speed percentage [-100, 100]\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n reward (float): Reward for the current state\n terminated (bool): Whether the episode is done or not\n truncated (bool): Whether the episode is truncated or not\n info (dict): Episode information\n \"\"\"\n\n # Send action to pendulum over serial\n self.send_serial(f\"{action*100},0\")\n self.action = action\n # Read state and episode done flag from serial\n self.observation_space, self.motorAngle, self.terminated = self.reader.get_state()\n\n # Store the angles of the episode for reward penalty\n self.episode_angles.append(self.state[0])\n \n # Calculate reward\n reward = self.calculate_reward(self.observation_space)\n self.episode_reward += reward\n self.iterCount += 1\n self.reset_policy(self.maxIter)\n self.info = {\"episode\": {\"r\": self.episode_reward, \"l\": self.iterCount}}\n\n return self.observation_space.astype(np.float32), reward, self.terminated, self.truncated, self.info\n\n def send_serial(self, command):\n \"\"\"\n Send a command to the pendulum over serial\n\n Args:\n command (str): [motor speed percentage, reset flag]\n\n Returns:\n None\n \"\"\"\n\n self.ser.write(f\"{command}\\n\".encode())\n # time.sleep(0.1)\n \n def reset_policy(self, reset_count=200):\n \"\"\"\n Policy to reset the environment\n\n Args:\n reset_count (int, optional): Number of iterations to wait before resetting the system. Defaults to 200.\n \n Returns:\n None\n \"\"\"\n\n if self.iterCount > reset_count:\n self.terminated = True\n \n def calculate_reward(self, state):\n \"\"\"\n Calculate the reward for the current state\n\n Args:\n state (np.array): [bar angle, bar angular velocity]\n\n Returns:\n reward (float): Reward for the current state\n \"\"\"\n\n # Constants to scale the angle and velocity penalties\n ANGLE_WEIGHT = 1.0\n VELOCITY_WEIGHT = 0.1\n MOTOR_ANGLE_WEIGHT = 1.0\n ACTION_WEIGHT = 0.01\n\n # Penalize the angle to be minimized\n angle_penalty = ANGLE_WEIGHT * (state[0] ** 2)\n # Penalize the angular velocity to be minimized\n velocity_penalty = VELOCITY_WEIGHT * (state[1] ** 2)\n\n # Penalize the motor angle to be minimized\n motor_angle = self.motorAngle / 180.0\n motor_angle_penalty = MOTOR_ANGLE_WEIGHT * (motor_angle ** 2)\n\n # Penalize the action to be minimized\n action_penalty = ACTION_WEIGHT * (self.action ** 2)\n\n # Reward is higher when penalties are lower\n reward = -(angle_penalty + velocity_penalty + motor_angle_penalty + action_penalty)\n\n # Penalize the reward if the average angle of the episode is close to pi\n # after 3/4 of the maximum iterations\n if self.iterCount > self.maxIter*3/4:\n if np.abs(np.mean(self.episode_angles)) < (np.pi-0.8):\n reward-=100.0\n # if self.terminated:\n # if self.iterCount < self.maxIter*1/10:\n # reward-=100.0\n return reward\n\n def render(self, camera=False):\n \"\"\"\n Render the state (optional), e.g. display the video stream\n \"\"\"\n if camera:\n print(\"Connect the camera to the pendulum and display the video stream.\")\n\n def close(self):\n \"\"\"\n Close the serial connection\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n\n self.ser.close()" }, { "identifier": "PyBulletPendulumEnv", "path": "control/reinforcement_learning/Environments/PyBulletPendulumEnv.py", "snippet": "class PyBulletPendulumEnv(gym.Env):\n \"\"\"\n PyBullet Rotary Pendulum\n \"\"\"\n\n metadata = {\"render_modes\": [\"human\"]}\n\n def __init__(self, render_mode=\"human\"):\n super(PyBulletPendulumEnv, self).__init__()\n \"\"\"\n Initialize the PyBullet Rotary Pendulum environment\n\n Args:\n render (bool, optional): Whether to render the environment. Defaults to True.\n\n Returns:\n None\n \"\"\"\n\n self.render_mode = render_mode\n # Initialize PyBullet\n if render_mode == \"human\":\n self.physicsClient = p.connect(p.GUI)\n else:\n self.physicsClient = p.connect(p.DIRECT)\n\n p.setAdditionalSearchPath(pybullet_data.getDataPath())\n p.setGravity(0, 0, -9.806)\n # move camera to focus on the robot\n p.resetDebugVisualizerCamera(cameraDistance=0.4, cameraYaw=0, cameraPitch=-30, cameraTargetPosition=[0,0,0.1])\n # Load the plane and pendulum URDF\n self.planeId = p.loadURDF(\"plane.urdf\")\n self.load_pendulum_urdf()\n\n # Define other environment parameters\n self.name = \"PyBulletPendulum\"\n self.nbJoint = 1\n self.num_state = 2\n self.action = 0.0\n self.n_actions = 101\n self.range_actions = np.array([-1.0, 1.0])\n self.range_observation = np.array([-1.0, 1.0])\n self.observation_space = spaces.Box(low=self.range_observation[0], high=self.range_observation[1], shape=(self.num_state,), dtype=np.float32)\n self.action_space = spaces.Box(low=self.range_actions[0], high=self.range_actions[1], shape=(1,), dtype=np.float32)\n self.motorAngle = 0.0\n self.terminated = False\n self.truncated = False\n self.info = {}\n self.iterCount = 0\n self.maxIter = 1500\n self.omega_max = 10.0\n self.episode_reward = 0.0\n \n # variable to store angles of one episode\n self.episode_angles = []\n\n def load_pendulum_urdf(self):\n \"\"\"\n Load the pendulum URDF into the environment.\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n\n cubeStartPos = [0, 0, 0]\n cubeStartOrientation = p.getQuaternionFromEuler([np.pi / 2, 0, 0])\n curr_dir = os.path.abspath(os.path.dirname(__file__))\n robot_urdf = 'Rotary_Pendulum_URDF.urdf'\n # Construct the path to the URDF file\n urdf_path = os.path.join(curr_dir, '..', '..', '..', 'simulation', 'urdf', robot_urdf)\n self.robotId = p.loadURDF(urdf_path, cubeStartPos, cubeStartOrientation,\n # flags=p.URDF_USE_INERTIA_FROM_FILE,\n useFixedBase=True\n )\n\n # Define joint indices as per your URDF structure\n self.motor_joint_idx = [p.getJointInfo(self.robotId, i)[1] for i in range(p.getNumJoints(self.robotId))].index(b'Revolute_3')\n self.bar_joint_idx = [p.getJointInfo(self.robotId, i)[1] for i in range(p.getNumJoints(self.robotId))].index(b'Revolute_5')\n\n # Define real robot parameters\n self.steps_per_rev = 3200\n self.max_speed_steps_per_sec = 4000.0\n # Calculate radians per step\n self.radians_per_step = (2 * np.pi) / self.steps_per_rev\n # Calculate max speed in radians per second [rad/s]\n self.max_motor_speed = self.max_speed_steps_per_sec * self.radians_per_step\n # Admissible motor angle range [deg]\n self.motor_angle_range = [-150, 150]\n self.out_of_range = False\n\n # Compensation angles for the URDF\n self.motor_compensation_angle = 0.400\n self.bar_compensation_angle = -0.264\n\n def reset(self, seed=None, options=None):\n \"\"\"\n Reset the environment to a random state\n\n Args:\n None\n\n Returns:\n state (np.array): [bar_angle, bar_angular_velocity]\n \"\"\"\n\n super().reset(seed=seed, options=options)\n # Reset the episode angles\n self.episode_angles = []\n self.episode_reward = 0.0\n self.terminated = False\n # Send command to pendulum to reset to random position\n self.send_fake_serial([0, 1])\n\n # get the state from the pendulum\n self.observation_space, self.motorAngle, self.terminated = self.get_state()\n\n # Reset iteration count\n self.iterCount = 0\n self.info = {\"episode\": {\"r\": 0.0, \"l\": self.iterCount}}\n \n return self.observation_space.astype(np.float32), self.info\n \n def step(self, action):\n \"\"\"\n Take a step in the environment\n\n Args:\n action (float): Motor speed percentage [-100, 100]\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n \"\"\"\n\n # multiply the action by 100 to get the percentage\n self.action = action*100.0\n # Send action to pendulum over serial\n self.send_fake_serial([self.action, 0])\n # Read state and episode done flag from serial\n self.observation_space, self.motorAngle, self.terminated = self.get_state()\n\n # Store the angles of the episode for reward penalty\n self.episode_angles.append(self.observation_space[0])\n \n # Calculate reward\n reward = self.calculate_reward(self.observation_space)\n self.episode_reward += reward\n self.iterCount += 1\n self.reset_policy(self.maxIter)\n self.info = {\"episode\": {\"r\": self.episode_reward, \"l\": self.iterCount}}\n\n # return normalized_state, reward, self.done\n return self.observation_space.astype(np.float32), reward, self.terminated, self.truncated, self.info\n \n def calculate_reward(self, state):\n \"\"\"\n Calculate the reward for the current state\n\n Args:\n state (np.array): [bar angle, bar angular velocity]\n\n Returns:\n reward (float): Reward for the current state\n \"\"\"\n\n # Constants to scale the bar and motor angle penalties\n ANGLE_WEIGHT = 1.0\n VELOCITY_WEIGHT = 0.1\n MOTOR_ANGLE_WEIGHT = 0.001\n ACTION_WEIGHT = 0.001\n\n # Calculate the angle penalty\n angle_penalty = ANGLE_WEIGHT * (state[0]) ** 2\n # Calculate the velocity penalty\n velocity_penalty = VELOCITY_WEIGHT * (state[1]) ** 2\n # Calculate the motor angle penalty\n # motor_angle_penalty = MOTOR_ANGLE_WEIGHT * (self.motorAngle/self.motor_angle_range[1]) ** 2\n # Calculate the action penalty\n action_penalty = ACTION_WEIGHT * (self.action/100) ** 2\n\n # Calculate the reward\n reward = - (angle_penalty + velocity_penalty)\n\n # NEW REWARD FUNCTION\n # reward range [-1, 0]\n # angle_target = 0.0\n # angular_velocity_target = 0.0\n # motor_angle_target = 0.0\n\n # reward = -1/2 * (np.abs(state[0] - angle_target)/np.pi + np.abs(self.motorAngle - motor_angle_target)/self.motor_angle_range[1])\n # reward = - 1/2 * (np.abs(state[0] - angle_target) + np.abs(state[1] - angular_velocity_target))\n # if the episode is done with enough iterations\n # if self.iterCount > int(self.maxIter/2) and self.done:\n # # if the average of the bar angles is less than 90 degrees\n # if np.abs(np.mean(self.episode_angles)) < np.deg2rad(90):\n # reward += 100.0\n\n # if the episode is done with not enough iterations\n # if self.iterCount < int(self.maxIter/10) and self.terminated:\n # # if the motor angle is out of range\n # if self.out_of_range:\n # reward -= 2000.0\n \n return reward\n \n def reset_policy(self, reset_count=200):\n \"\"\"\n Policy to reset the environment\n\n Args:\n reset_count (int, optional): Number of iterations to wait before resetting the system. Defaults to 200.\n \n Returns:\n None\n \"\"\"\n\n if self.iterCount >= reset_count:\n self.terminated = True\n\n def send_fake_serial(self, command):\n \"\"\"\n Send a command to the pendulum, simulating a fake serial connection\n\n Args:\n command (list): [motor speed percentage, episode done flag]\n\n Returns:\n None\n \"\"\"\n\n motor_speed_percentage = command[0]\n episode_done = command[1]\n\n if episode_done:\n self.terminated = True\n self.reset_robot(mode=\"random\")\n else:\n self.terminated = False\n # Calculate the motor speed in steps per second\n motor_speed = motor_speed_percentage * self.max_motor_speed / 100.0\n # set the motor velocity\n p.setJointMotorControl2(bodyUniqueId=self.robotId,\n jointIndex=self.motor_joint_idx,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=motor_speed,\n )\n\n # time.sleep(0.1)\n \n def get_state(self):\n \"\"\"\n Read the state from the pendulum, simulating a fake serial connection\n\n Args:\n None\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n motor_angle (float): Motor angle in degrees\n done (bool): Episode done flag\n \"\"\"\n\n # Get the bar angle\n bar_angle = p.getJointState(self.robotId, self.bar_joint_idx)[0] + self.bar_compensation_angle\n # Get bar angular velocity\n bar_angular_velocity = p.getJointState(self.robotId, self.bar_joint_idx)[1]\n # Get the motor angle\n motor_angle = np.rad2deg(p.getJointState(self.robotId, self.motor_joint_idx)[0] + self.motor_compensation_angle)\n\n # Map the motor angle to the correct range\n if motor_angle > self.motor_angle_range[1] or motor_angle < self.motor_angle_range[0]:\n self.out_of_range = True\n else:\n self.out_of_range = False\n \n # Adjusting the bar angle to map correctly\n bar_angle = bar_angle % (2 * np.pi) # Normalize the angle to be within 0 to 2π\n if bar_angle > np.pi:\n bar_angle -= 2 * np.pi # Adjust angles greater than π to be between -π to π\n \n if bar_angle > 0:\n bar_angle = np.pi - bar_angle\n elif bar_angle < 0:\n bar_angle = -np.pi - bar_angle\n\n # round the states to 4 decimal places\n bar_angle = round(bar_angle/np.pi, 4)\n bar_angular_velocity = round(bar_angular_velocity/self.omega_max, 4)\n motor_angle = round(motor_angle, 4)\n\n return np.array([bar_angle, bar_angular_velocity]), motor_angle, self.out_of_range\n \n def reset_robot(self, mode=\"random\"):\n \"\"\"\n Reset the robot state\n\n Args:\n mode (str, optional): Mode to reset the robot. Defaults to \"random\".\n\n Returns:\n state (np.array): [bar angle, bar angular velocity]\n \"\"\"\n\n if mode == \"random\":\n # Reset the robot to a random position\n bar_angle = np.random.uniform(-np.pi, np.pi)\n bar_angular_velocity = np.random.uniform(-self.omega_max, self.omega_max)\n motor_angle = np.deg2rad(np.random.uniform(self.motor_angle_range[0], self.motor_angle_range[1]))\n\n # Set the robot to the random position\n p.resetJointState(self.robotId, self.bar_joint_idx, targetValue=bar_angle)\n # p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=motor_angle)\n p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=-self.motor_compensation_angle)\n # set bar velocity with no force\n p.setJointMotorControl2(bodyUniqueId=self.robotId,\n jointIndex=self.bar_joint_idx,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=bar_angular_velocity,\n force=0\n )\n elif mode == \"home\":\n # Reset the robot to the home position\n p.resetJointState(self.robotId, self.bar_joint_idx, targetValue=-self.bar_compensation_angle)\n p.resetJointState(self.robotId, self.motor_joint_idx, targetValue=-self.motor_compensation_angle)\n\n # set bar velocity with no force\n p.setJointMotorControl2(bodyUniqueId=self.robotId,\n jointIndex=self.bar_joint_idx,\n controlMode=p.VELOCITY_CONTROL,\n targetVelocity=0,\n force=0\n )\n \n return self.get_state()[0]\n \n def render(self, fps=240.0):\n \"\"\"\n Render the pendulum in PyBullet\n\n Args:\n fps (float, optional): Number of frames per second. Defaults to 240.0.\n\n Returns:\n None\n \"\"\"\n p.stepSimulation()\n if self.render_mode == \"human\":\n time.sleep(1./fps)\n \n def close(self):\n \"\"\"\n Close the PyBullet connection\n\n Args:\n None\n\n Returns:\n None\n \"\"\"\n p.disconnect()" }, { "identifier": "EnergyController", "path": "control/pid/classes/EnergyController.py", "snippet": "class EnergyController:\r\n \"\"\"\r\n Speed-Based Controller class for Furuta Pendulum\r\n \"\"\"\r\n def __init__(self, upright_threshold=0.2, length=0.02):\r\n \"\"\"\r\n Initialize Speed-Based controller\r\n\r\n Args:\r\n upright_threshold (float): Threshold for switching to proportional control near the upright position\r\n length (float): Length of the pendulum arm\r\n inertia (float): Inertia of the pendulum\r\n \"\"\"\r\n\r\n self.control_input = 0 # Initialize motor speed control input\r\n self.upright_threshold = upright_threshold\r\n self.length = length\r\n self.bar_mass = self.calculate_bar_mass(0.006, self.length)\r\n self.inertia = self.calculate_polar_inertia(self.bar_mass, self.length)\r\n self.gravity = 9.806\r\n self.k_energy = 200\r\n\r\n def calculate_bar_mass(self, radius, height):\r\n \r\n \"\"\"\r\n Calculate the mass of a steel cylinder\r\n\r\n Args:\r\n radius (float): Radius of the cylinder\r\n height (float): Height of the cylinder\r\n\r\n Returns:\r\n mass (float): Mass of the cylinder\r\n \"\"\"\r\n\r\n density = 7850 # Density of steel in kg/m^3\r\n mass = density * np.pi * radius**2 * height # Volume of cylinder * density [kg]\r\n \r\n return mass\r\n\r\n def calculate_polar_inertia(self, mass, length):\r\n \"\"\"\r\n Calculate the polar moment of inertia of a thin rod about its pivot\r\n\r\n Args:\r\n mass (float): Mass of the rod\r\n length (float): Length of the rod\r\n\r\n Returns:\r\n polar_inertia (float): Polar moment of inertia of the rod about its pivot\r\n \"\"\"\r\n\r\n # Calculate moment of inertia about the pivot of the rod\r\n inertia_pivot = (1 / 12) * mass * length**2\r\n\r\n return inertia_pivot\r\n \r\n def total_energy(self, angle, angular_velocity):\r\n \"\"\"\r\n Calculate the total energy of the system\r\n\r\n Args:\r\n angle (float): Angle of the bar, in the range [-pi, pi]\r\n angular_velocity (float): Angular velocity of the bar, in the range [-10, 10]\r\n\r\n Returns:\r\n total_energy (float): Total energy of the system\r\n \"\"\"\r\n\r\n # Compute potential energy\r\n potential_energy = self.bar_mass * self.gravity * (self.length/2) * (1 - np.cos(angle))\r\n\r\n # Compute kinetic energy\r\n kinetic_energy = 0.5 * self.inertia * angular_velocity**2\r\n\r\n # Calculate total energy as the sum of potential and kinetic energy\r\n total_energy = potential_energy + kinetic_energy\r\n\r\n return total_energy\r\n \r\n def control(self, angle, angular_velocity):\r\n \"\"\"\r\n Compute Speed-Based control signal\r\n\r\n Args:\r\n angle (float): Angle of the bar, in the range [-pi, pi]\r\n angular_velocity (float): Angular velocity of the bar, in the range [-10, 10]\r\n\r\n Returns:\r\n control_input (float): Motor speed control input\r\n \"\"\"\r\n\r\n # Calculate reference energy\r\n reference_energy = self.total_energy(0, 0)\r\n\r\n # if np.abs(angle) > self.upright_threshold:\r\n # Calculate energy error\r\n energy_error = self.total_energy(angle, angular_velocity) - reference_energy\r\n\r\n # Calculate control input based on the paper's formulation, \r\n self.control_input = self.k_energy * energy_error * 100 * np.sign(angular_velocity * np.cos(angle))\r\n\r\n # else:\r\n # # use proportional control near the upright position\r\n # self.control_input = 100 * angle\r\n \r\n self.control_input = np.clip(self.control_input, -100, 100)\r\n\r\n return self.control_input\r\n\r\n def reset(self):\r\n \"\"\"\r\n Reset Speed-Based controller\r\n \"\"\"\r\n self.control_input = 0\r" } ]
from ...reinforcement_learning.Environments import RealPendulumEnv as real from ...reinforcement_learning.Environments import PyBulletPendulumEnv as pybullet from ..classes.EnergyController import EnergyController import numpy as np import time
6,298
real_pendulum = False # Example usage energy_controller = EnergyController() K_motor = 0.0 desired_bar_angle = 0 desired_bar_velocity = 0 desired_motor_angle = 0 if real_pendulum: # initialize RealPendulum environment env = real.RealPendulumEnv("COM3", 115200) else: # initialize PyBulletPendulum environment
real_pendulum = False # Example usage energy_controller = EnergyController() K_motor = 0.0 desired_bar_angle = 0 desired_bar_velocity = 0 desired_motor_angle = 0 if real_pendulum: # initialize RealPendulum environment env = real.RealPendulumEnv("COM3", 115200) else: # initialize PyBulletPendulum environment
env = pybullet.PyBulletPendulumEnv(render_mode='human')
2
2023-12-09 11:22:54+00:00
8k
JayYik/GAN_implementations
utils/get_model.py
[ { "identifier": "DCGAN", "path": "models/DCGAN.py", "snippet": "class DCGAN(nn.Module):\n def __init__(self, args):\n super(DCGAN, self).__init__()\n self.G=DCGAN_G(args.hw,args.z_dim,args.in_channels)\n self.D=DCGAN_D(args.hw,args.in_channels)\n # self.G.weight_init()\n # self.D.weight_init()\n\n self.args=args\n self.batch_size=args.batch_size\n self.z_dim=args.z_dim\n self.bce_loss = nn.BCELoss()\n\n self.optim_g = optim.Adam(self.G.parameters(), lr=args.lr_g,betas=args.betas)\n self.optim_d = optim.Adam(self.D.parameters(), lr=args.lr_d,betas=args.betas)\n self.scheduler_optim_g=optim.lr_scheduler.MultiStepLR(self.optim_g, milestones=[100,150], gamma=0.9)\n self.scheduler_optim_d=optim.lr_scheduler.LambdaLR(self.optim_d, lr_lambda=self.warm_up)\n \n # Recording program start time for log directory naming\n program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())\n # Logging information\n self.information=f'DCGAN-{program_begin_time}'\n # TensorBoard SummaryWriter for logging\n self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))\n\n def warm_up(self,epoch):\n \"\"\"\n Learning rate warm-up function for the Adam optimizer.\n\n Args:\n epoch (int): Current epoch number.\n\n Returns:\n float: Adjusted learning rate based on the warm-up strategy.\n \"\"\"\n top_epoch = int(self.args.num_epochs*0.3)\n if epoch<top_epoch:\n #In the first 30% of epochs, slowly increase the LR to the preset LR\n return (epoch+1) / top_epoch\n else:\n #Drop the LR to half of the preset\n return (1 -( 0.5 / (self.args.num_epochs - top_epoch) * (epoch - top_epoch) ) )\n\n def save_model(self,epoch):\n save_path=f'./save/{self.information}'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')\n torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')\n self.save_args(save_path)\n print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')\n\n def save_args(self,save_path):\n argsDict = self.args.__dict__\n with open(f'{save_path}/setting.txt', 'w') as f:\n f.writelines('------------------ start ------------------' + '\\n')\n for eachArg, value in argsDict.items():\n f.writelines(eachArg + ' : ' + str(value) + '\\n')\n f.writelines('------------------- end -------------------')\n\n\n def train(self,train_loader,device):\n \"\"\"\n Training function for the DCGAN model.\n\n Args:\n train_loader (DataLoader): DataLoader for training data.\n device (torch.device): The device (CPU or GPU) to perform training.\n\n Returns:\n None\n \"\"\"\n\n # Move the model and loss to the specified device\n self.G.to(device)\n self.D.to(device)\n self.bce_loss.to(device)\n generator_iter = 0\n descriminator_iter = 0\n \n # Training loop\n for epoch in range(self.args.num_epochs):\n self.t_begin = t.time()\n pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)\n\n for i, (images, _) in pbar:\n if i == train_loader.dataset.__len__() // self.batch_size:\n break\n\n # Generate random noise and labels\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n real_labels = torch.ones(self.batch_size)\n fake_labels = torch.zeros(self.batch_size)\n\n # Move data to the specified device\n images=images.to(device)\n z=z.to(device)\n real_labels=real_labels.to(device)\n fake_labels=fake_labels.to(device)\n\n # Train Discriminator\n real_output = self.D(images)\n #print('real_output:',real_output)\n fake_images = self.G(z)\n fake_output = self.D(fake_images)\n d_real_loss = self.bce_loss(real_output.flatten(), real_labels)\n d_fake_loss = self.bce_loss(fake_output.flatten(), fake_labels)\n #print('real_loss:',d_real_loss.item(),' fake_loss:',d_fake_loss.item())\n d_loss = d_real_loss + d_fake_loss\n self.D.zero_grad()\n d_loss.backward()\n self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)\n self.optim_d.step()\n descriminator_iter+=1\n\n # Train Generator\n if i % self.args.des_iter == 0:\n #print(\"i:\",i)\n self.D.zero_grad()\n self.G.zero_grad()\n\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n z = z.to(device)\n\n fake_images = self.G(z)\n fake_output = self.D(fake_images)\n fake_score = fake_output.squeeze().mean()\n #print('fake_output:',fake_output)\n g_loss = self.bce_loss(fake_output.flatten(), real_labels)\n g_loss.backward()\n pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item(),'fake_socre':fake_score.item()})\n #print('g_loss:',g_loss.item())\n self.optim_g.step()\n self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)\n generator_iter+=1\n \n # Save generated images\n if generator_iter % 500 == 0:\n \n if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):\n os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')\n\n z = torch.randn((self.batch_size,self.args.z_dim, 1, 1))\n z=z.to(device)\n samples = self.G(z)\n samples = samples.mul(0.5).add(0.5)\n samples = samples.data.cpu()[:25]\n grid = torchvision.utils.make_grid(samples,nrow=5)\n torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))\n \n # Learning rate scheduling\n self.scheduler_optim_d.step()\n self.scheduler_optim_g.step()\n\n # Print and log training information\n print(self.optim_d.state_dict()['param_groups'][0]['lr'])\n print(self.optim_g.state_dict()['param_groups'][0]['lr'])\n self.t_end = t.time()\n print(\n \"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]\"\n % (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))\n )\n\n # Save the trained parameters\n if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:\n self.save_model(epoch)" }, { "identifier": "GAN", "path": "models/GAN.py", "snippet": "class GAN(nn.Module):\n def __init__(self, args):\n super(GAN, self).__init__()\n self.G=GAN_G(args.hw,args.z_dim,args.in_channels)\n self.D=GAN_D(args.hw,args.in_channels)\n\n\n self.args=args\n self.batch_size=args.batch_size\n self.z_dim=args.z_dim\n self.bce_loss = nn.BCELoss()\n\n self.optim_g = optim.Adam(self.G.parameters(), lr=args.lr_g,betas=args.betas)\n self.optim_d = optim.Adam(self.D.parameters(), lr=args.lr_d,betas=args.betas)\n\n \n # Recording program start time for log directory naming\n program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())\n # Logging information\n self.information=f'GAN-{program_begin_time}'\n # TensorBoard SummaryWriter for logging\n self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))\n\n\n def save_model(self,epoch):\n save_path=f'./save/{self.information}'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')\n torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')\n self.save_args(save_path)\n print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')\n\n def save_args(self,save_path):\n argsDict = self.args.__dict__\n with open(f'{save_path}/setting.txt', 'w') as f:\n f.writelines('------------------ start ------------------' + '\\n')\n for eachArg, value in argsDict.items():\n f.writelines(eachArg + ' : ' + str(value) + '\\n')\n f.writelines('------------------- end -------------------')\n\n\n def train(self,train_loader,device):\n \"\"\"\n Training function for the GAN model.\n\n Args:\n train_loader (DataLoader): DataLoader for training data.\n device (torch.device): The device (CPU or GPU) to perform training.\n\n Returns:\n None\n \"\"\"\n\n # Move the model and loss to the specified device\n self.G.to(device)\n self.D.to(device)\n self.bce_loss.to(device)\n generator_iter = 0\n descriminator_iter = 0\n \n # Training loop\n for epoch in range(self.args.num_epochs):\n self.t_begin = t.time()\n pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)\n\n for i, (images, _) in pbar:\n if i == train_loader.dataset.__len__() // self.batch_size:\n break\n\n # Generate random noise and labels\n z = torch.randn((self.batch_size, self.z_dim))\n real_labels = torch.ones(self.batch_size)\n fake_labels = torch.zeros(self.batch_size)\n\n # Move data to the specified device\n images=images.to(device)\n z=z.to(device)\n real_labels=real_labels.to(device)\n fake_labels=fake_labels.to(device)\n\n # Train Discriminator\n real_output = self.D(images)\n #print('real_output:',real_output)\n fake_images = self.G(z)\n fake_output = self.D(fake_images)\n d_real_loss = self.bce_loss(real_output.flatten(), real_labels)\n d_fake_loss = self.bce_loss(fake_output.flatten(), fake_labels)\n #print('real_loss:',d_real_loss.item(),' fake_loss:',d_fake_loss.item())\n d_loss = d_real_loss + d_fake_loss\n self.D.zero_grad()\n d_loss.backward()\n self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)\n self.optim_d.step()\n descriminator_iter+=1\n\n # Train Generator\n if i % self.args.des_iter == 0:\n #print(\"i:\",i)\n self.D.zero_grad()\n self.G.zero_grad()\n\n z = torch.randn((self.batch_size, self.z_dim))\n z = z.to(device)\n\n fake_images = self.G(z)\n fake_output = self.D(fake_images)\n fake_score = fake_output.squeeze().mean()\n #print('fake_output:',fake_output)\n g_loss = self.bce_loss(fake_output.flatten(), real_labels)\n g_loss.backward()\n pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item(),'fake_socre':fake_score.item()})\n #print('g_loss:',g_loss.item())\n self.optim_g.step()\n self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)\n generator_iter+=1\n \n # Save generated images\n if generator_iter % 500 == 0:\n \n if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):\n os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')\n\n z = torch.randn((self.batch_size,self.args.z_dim))\n z=z.to(device)\n samples = self.G(z)\n samples = samples.mul(0.5).add(0.5)\n samples = samples.data.cpu()[:25]\n grid = torchvision.utils.make_grid(samples,nrow=5)\n torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))\n \n\n # Print and log training information\n print(self.optim_d.state_dict()['param_groups'][0]['lr'])\n print(self.optim_g.state_dict()['param_groups'][0]['lr'])\n self.t_end = t.time()\n print(\n \"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]\"\n % (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))\n )\n\n # Save the trained parameters\n if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:\n self.save_model(epoch)" }, { "identifier": "WGAN_CP", "path": "models/WGAN.py", "snippet": "class WGAN_CP(nn.Module):\n def __init__(self, args):\n super(WGAN_CP, self).__init__()\n self.G=WGANCP_G(args.hw,args.z_dim,args.in_channels)\n self.D=WGANCP_D(args.hw,args.in_channels)\n # self.G.weight_init()\n # self.D.weight_init()\n\n self.args=args\n self.batch_size=args.batch_size\n self.z_dim=args.z_dim\n\n # Attention!!! WGAN use RMSprop optimizer instead of Adam\n self.optim_g = optim.RMSprop(self.G.parameters(), lr=args.lr_g)\n self.optim_d = optim.RMSprop(self.D.parameters(), lr=args.lr_d)\n self.scheduler_optim_g=optim.lr_scheduler.MultiStepLR(self.optim_g, milestones=[100,150], gamma=0.9)\n self.scheduler_optim_d=optim.lr_scheduler.LambdaLR(self.optim_d, lr_lambda=self.warm_up)\n \n # Recording program start time for log directory naming\n program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())\n # Logging information\n self.information=f'WGAN-{program_begin_time}'\n # TensorBoard SummaryWriter for logging\n self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))\n\n def warm_up(self,epoch):\n \"\"\"\n Learning rate warm-up function for the RMSprop optimizer.\n\n Args:\n epoch (int): Current epoch number.\n\n Returns:\n float: Adjusted learning rate based on the warm-up strategy.\n \"\"\"\n top_epoch = int(self.args.num_epochs*0.3)\n if epoch<top_epoch:\n #In the first 30% of epochs, slowly increase the LR to the preset LR\n return (epoch+1) / top_epoch\n else:\n #Drop the LR to half of the preset\n return (1 -( 0.5 / (self.args.num_epochs - top_epoch) * (epoch - top_epoch) ) )\n\n def save_model(self,epoch):\n save_path=f'./save/{self.information}'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')\n torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')\n self.save_args(save_path)\n print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')\n\n def save_args(self,save_path):\n argsDict = self.args.__dict__\n with open(f'{save_path}/setting.txt', 'w') as f:\n f.writelines('------------------ start ------------------' + '\\n')\n for eachArg, value in argsDict.items():\n f.writelines(eachArg + ' : ' + str(value) + '\\n')\n f.writelines('------------------- end -------------------')\n\n\n def train(self,train_loader,device):\n \"\"\"\n Training function for the WGAN model.\n\n Args:\n train_loader (DataLoader): DataLoader for training data.\n device (torch.device): The device (CPU or GPU) to perform training.\n\n Returns:\n None\n \"\"\"\n\n # Move the model and loss to the specified device\n self.G.to(device)\n self.D.to(device)\n generator_iter = 0\n descriminator_iter = 0\n \n # Training loop\n for epoch in range(self.args.num_epochs):\n self.t_begin = t.time()\n pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)\n\n for i, (images, _) in pbar:\n if i == train_loader.dataset.__len__() // self.batch_size:\n break\n\n # Generate random noise and labels\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n real_labels = torch.ones(self.batch_size)\n fake_labels = torch.zeros(self.batch_size)\n\n # Move data to the specified device\n images=images.to(device)\n z=z.to(device)\n real_labels=real_labels.to(device)\n fake_labels=fake_labels.to(device)\n\n # Train Discriminator\n for p in self.D.parameters():\n p.data.clamp_(-self.args.wc, self.args.wc)\n\n\n d_loss_real = self.D(images)\n d_loss_real = d_loss_real.mean(0).view(1)\n\n fake_images = self.G(z)\n d_loss_fake = self.D(fake_images)\n d_loss_fake = d_loss_fake.mean(0).view(1)\n\n d_loss = d_loss_fake - d_loss_real\n Wasserstein_D = d_loss_real - d_loss_fake\n self.D.zero_grad()\n d_loss.backward()\n self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)\n self.writer.add_scalar('Wasserstein_D', Wasserstein_D.item(), descriminator_iter)\n self.optim_d.step()\n descriminator_iter+=1\n\n # Train Generator\n if i % self.args.des_iter == 0:\n #print(\"i:\",i)\n self.D.zero_grad()\n self.G.zero_grad()\n\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n z = z.to(device)\n\n fake_images = self.G(z)\n \n #print('fake_output:',fake_output)\n g_loss = self.D(fake_images)\n g_loss = g_loss.mean(0).view(1).mul(-1)\n g_loss.backward()\n pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item()})\n #print('g_loss:',g_loss.item())\n self.optim_g.step()\n self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)\n generator_iter+=1\n \n # Save generated images\n if generator_iter % 500 == 0:\n \n if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):\n os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')\n\n z = torch.randn((self.batch_size,self.args.z_dim, 1, 1))\n z=z.to(device)\n samples = self.G(z)\n samples = samples.mul(0.5).add(0.5)\n samples = samples.data.cpu()[:25]\n grid = torchvision.utils.make_grid(samples,nrow=5)\n torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))\n \n # Learning rate scheduling\n self.scheduler_optim_d.step()\n self.scheduler_optim_g.step()\n\n # Print and log training information\n print(self.optim_d.state_dict()['param_groups'][0]['lr'])\n print(self.optim_g.state_dict()['param_groups'][0]['lr'])\n self.t_end = t.time()\n print(\n \"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]\"\n % (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))\n )\n\n # Save the trained parameters\n if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:\n self.save_model(epoch)" }, { "identifier": "WGAN_GP", "path": "models/WGAN_GP.py", "snippet": "class WGAN_GP(nn.Module):\n def __init__(self, args):\n super(WGAN_GP, self).__init__()\n self.G=WGANGP_G(args.hw,args.z_dim,args.in_channels)\n self.D=WGANGP_D(args.hw,args.in_channels)\n # self.G.weight_init()\n # self.D.weight_init()\n\n self.args=args\n self.batch_size=args.batch_size\n self.z_dim=args.z_dim\n self.gp_lambda=args.gp_lambda\n\n \n self.optim_g = optim.Adam(self.G.parameters(), lr=args.lr_g, betas=args.betas)\n self.optim_d = optim.Adam(self.D.parameters(), lr=args.lr_d, betas=args.betas)\n self.scheduler_optim_g=optim.lr_scheduler.MultiStepLR(self.optim_g, milestones=[100,150], gamma=0.95)\n self.scheduler_optim_d=optim.lr_scheduler.LambdaLR(self.optim_d, lr_lambda=self.warm_up)\n \n # Recording program start time for log directory naming\n program_begin_time = t.strftime('%Y-%m-%d %H:%M', t.localtime())\n # Logging information\n self.information=f'WGAN_GP-{program_begin_time}'\n # TensorBoard SummaryWriter for logging\n self.writer=SummaryWriter(os.path.join(self.args.log_dir,self.information))\n\n def warm_up(self,epoch):\n \"\"\"\n Learning rate warm-up function for the Adam optimizer.\n\n Args:\n epoch (int): Current epoch number.\n\n Returns:\n float: Adjusted learning rate based on the warm-up strategy.\n \"\"\"\n top_epoch = int(self.args.num_epochs*0.3)\n if epoch<top_epoch:\n #In the first 30% of epochs, slowly increase the LR to the preset LR\n return (epoch+1) / top_epoch\n else:\n #Drop the LR to half of the preset\n return (1 -( 0.5 / (self.args.num_epochs - top_epoch) * (epoch - top_epoch) ) )\n\n def save_model(self,epoch):\n save_path=f'./save/{self.information}'\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(self.G.state_dict(), f'{save_path}/generator_{epoch}epochs.pth')\n torch.save(self.D.state_dict(), f'{save_path}/discriminator_{epoch}epochs.pth')\n self.save_args(save_path)\n print(f'Models save to {save_path}/generator_{epoch}epochs.pth & {save_path}/discriminator_{epoch}epochs.pth ')\n\n def save_args(self,save_path):\n argsDict = self.args.__dict__\n with open(f'{save_path}/setting.txt', 'w') as f:\n f.writelines('------------------ start ------------------' + '\\n')\n for eachArg, value in argsDict.items():\n f.writelines(eachArg + ' : ' + str(value) + '\\n')\n f.writelines('------------------- end -------------------')\n\n\n def train(self,train_loader,device):\n \"\"\"\n Training function for the WGAN-GP model.\n\n Args:\n train_loader (DataLoader): DataLoader for training data.\n device (torch.device): The device (CPU or GPU) to perform training.\n\n Returns:\n None\n \"\"\"\n\n # Move the model and loss to the specified device\n self.G.to(device)\n self.D.to(device)\n generator_iter = 0\n descriminator_iter = 0\n \n # Training loop\n for epoch in range(self.args.num_epochs):\n self.t_begin = t.time()\n pbar=tqdm(enumerate(train_loader),total=len(train_loader),ncols=100)\n\n for i, (images, _) in pbar:\n if i == train_loader.dataset.__len__() // self.batch_size:\n break\n for p in self.D.parameters():\n p.requires_grad = True\n # Generate random noise and labels\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n real_labels = torch.ones(self.batch_size)\n fake_labels = torch.zeros(self.batch_size)\n\n # Move data to the specified device\n images=images.to(device)\n z=z.to(device)\n real_labels=real_labels.to(device)\n fake_labels=fake_labels.to(device)\n\n # Train Discriminator\n d_loss_real = self.D(images)\n d_loss_real = d_loss_real.mean(0).view(1)\n\n fake_images = self.G(z)\n d_loss_fake = self.D(fake_images)\n d_loss_fake = d_loss_fake.mean(0).view(1)\n\n gradient_penalty = self.calculate_gradient_penalty(images.data, fake_images.data,device)\n\n d_loss = d_loss_fake - d_loss_real + gradient_penalty\n Wasserstein_D = d_loss_real - d_loss_fake\n self.D.zero_grad()\n d_loss.backward()\n self.writer.add_scalar('D_loss', d_loss.item(), descriminator_iter)\n self.writer.add_scalar('Wasserstein_D', Wasserstein_D.item(), descriminator_iter)\n self.optim_d.step()\n descriminator_iter+=1\n\n # Train Generator\n if i % self.args.des_iter == 0:\n \n for p in self.D.parameters():\n p.requires_grad = False # to avoid computation\n\n #print(\"i:\",i)\n self.D.zero_grad()\n self.G.zero_grad()\n\n z = torch.randn((self.batch_size, self.z_dim, 1, 1))\n z = z.to(device)\n\n fake_images = self.G(z)\n \n #print('fake_output:',fake_output)\n g_loss = self.D(fake_images)\n g_loss = g_loss.mean(0).view(1).mul(-1)\n g_loss.backward()\n pbar.set_postfix({'G_loss': g_loss.item(),'D_loss': d_loss.item()})\n #print('g_loss:',g_loss.item())\n self.optim_g.step()\n self.writer.add_scalar('G_loss', g_loss.item(), generator_iter)\n generator_iter+=1\n \n # Save generated images\n if generator_iter % 500 == 0:\n \n if not os.path.exists(f'./training_result_{self.args.dataset}-{self.information}/'):\n os.makedirs(f'./training_result_{self.args.dataset}-{self.information}/')\n\n z = torch.randn((self.batch_size,self.args.z_dim, 1, 1))\n z=z.to(device)\n samples = self.G(z)\n samples = samples.mul(0.5).add(0.5)\n samples = samples.data.cpu()[:25]\n grid = torchvision.utils.make_grid(samples,nrow=5)\n torchvision.utils.save_image(grid, './training_result_{}/img_generatori_iter_{}.png'.format(self.args.dataset+'-'+self.information,str(generator_iter).zfill(3)))\n \n # Learning rate scheduling\n self.scheduler_optim_d.step()\n self.scheduler_optim_g.step()\n\n # Print and log training information\n print(self.optim_d.state_dict()['param_groups'][0]['lr'])\n print(self.optim_g.state_dict()['param_groups'][0]['lr'])\n self.t_end = t.time()\n print(\n \"[Epoch %d/%d] [D loss: %f] [G loss: %f] [training time: %.3fseconds]\"\n % (epoch, self.args.num_epochs, d_loss.item(), g_loss.item() , (self.t_end - self.t_begin))\n )\n\n # Save the trained parameters\n if epoch % (self.args.num_epochs // 5) == 0 and epoch !=0:\n self.save_model(epoch)\n\n\n def calculate_gradient_penalty(self, real_images, fake_images, device):\n eta = torch.FloatTensor(self.batch_size,1,1,1).uniform_(0,1)\n eta = eta.expand(self.batch_size, real_images.size(1), real_images.size(2), real_images.size(3))\n eta=eta.to(device)\n\n interpolated = eta * real_images + ((1 - eta) * fake_images)\n interpolated.requires_grad_(True)\n\n # calculate probability of interpolated examples\n prob_interpolated = self.D(interpolated)\n\n grad_outputs=torch.ones(prob_interpolated.size()).to(device)\n grad_outputs.requires_grad_(True)\n # calculate gradients of probabilities with respect to examples\n gradients = autograd.grad(outputs=prob_interpolated, inputs=interpolated,\n grad_outputs=grad_outputs,\n create_graph=True, retain_graph=True)[0]\n\n grad_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * self.gp_lambda\n return grad_penalty" } ]
import torch from models.DCGAN import DCGAN from models.GAN import GAN from models.WGAN import WGAN_CP from models.WGAN_GP import WGAN_GP
7,182
def get_model(args): if args.model == 'DCGAN': net=DCGAN(args) elif args.model == 'GAN':
def get_model(args): if args.model == 'DCGAN': net=DCGAN(args) elif args.model == 'GAN':
net=GAN(args)
1
2023-12-12 06:24:31+00:00
8k
anyquest/pyaq
aq/jobs/scheduler.py
[ { "identifier": "JobManager", "path": "aq/jobs/manager.py", "snippet": "class JobManager:\n app_jobs: Dict[str, AppJob]\n activity_jobs: Dict[str, Dict[str, List[ActivityJob]]]\n\n def __init__(self):\n self.app_jobs = {}\n self.activity_jobs = {}\n self._logger = logging.getLogger(self.__class__.__name__)\n\n def create_app_job(self, app: App) -> AppJob:\n app_job = AppJob(app)\n self.app_jobs[app_job.id] = app_job\n self.activity_jobs[app_job.id] = {}\n return app_job\n\n def create_activity_job(self, app_job: AppJob, activity_name: str) -> ActivityJob:\n app = app_job.app\n if activity_name not in app.activities:\n raise AppJobError(f\"Activity {activity_name} not found\")\n activity_job = ActivityJob(activity_name, app_job)\n self.activity_jobs[app_job.id].setdefault(activity_name, []).append(activity_job)\n return activity_job\n\n def get_next_activities(self, activity_job: ActivityJob) -> List[str]:\n app = activity_job.app_job.app\n rv = []\n for activity_name in app.activities:\n activity = app.activities[activity_name]\n for activity_input in activity.inputs or []:\n if (activity_input.activity == activity_job.activity_name and\n not self.is_waiting_for_jobs(activity_job.app_job, activity)):\n rv.append(activity_name)\n return rv\n\n def is_waiting_for_jobs(self, app_job: AppJob, activity: Activity):\n for activity_input in activity.inputs or []:\n jobs = self.activity_jobs[app_job.id].get(activity_input.activity, None)\n if not jobs or any(not job.finished for job in jobs):\n return True\n return False\n\n def get_inputs_for_activity(self, app_job: AppJob, activity: Activity) -> List[Dict[str, Any]]:\n inputs_for_activity = {}\n if activity.inputs:\n for activity_input in activity.inputs:\n job_outputs = [job.output for job in self.activity_jobs[app_job.id].get(activity_input.activity, [])]\n inputs_for_activity[activity_input.activity] = job_outputs if len(job_outputs) > 1 else job_outputs[0]\n\n for activity_input in activity.inputs:\n if activity_input.map:\n try:\n expr = parse(activity_input.map)\n val = inputs_for_activity[activity_input.activity]\n inputs = []\n for match in expr.find(json.loads(val)):\n if isinstance(match.value, list):\n for input_value in match.value:\n inputs.append({**inputs_for_activity, activity_input.activity: input_value})\n return inputs\n except Exception as e:\n self._logger.error(f\"Failed to parse a map expression {e}\")\n return []\n\n return [inputs_for_activity]\n\n def get_outputs(self, app_job: AppJob) -> Dict[str, Any]:\n app = app_job.app\n\n # Terminal activities have inputs and do not have any other activities that take their outputs\n terminal_activities = [\n activity_name\n for activity_name, activity in app.activities.items()\n if activity.inputs and not any(\n activity_input.activity == activity_name\n for some_other_activity in app.activities.values()\n for activity_input in some_other_activity.inputs or []\n )\n ]\n\n # Collect outputs from terminal activities\n outputs = {\n activity_name: [job.output for job in self.activity_jobs[app_job.id].get(activity_name, [])]\n for activity_name in terminal_activities\n }\n\n # Remove empty values and return\n return {key: value for key, value in outputs.items() if value}" }, { "identifier": "AppJobError", "path": "aq/jobs/manager.py", "snippet": "class AppJobError(Exception):\n pass" }, { "identifier": "ReadActivity", "path": "aq/activities/read.py", "snippet": "class ReadActivity(BaseActivity):\n def __init__(self, pdf_reader: PdfReader, file_reader: FileReader, image_reader: ImageReader):\n self._logger = logging.getLogger(self.__class__.__name__)\n self._handlers = {\n \"application/pdf\": pdf_reader,\n \"application/json\": file_reader,\n \"text/plain\": file_reader,\n \"text/markdown\": file_reader,\n \"image/jpeg\": image_reader,\n \"image/jpg\": image_reader,\n \"image/png\": image_reader\n }\n\n async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:\n try:\n file_path = inputs.get(\"file_path\")\n if not file_path or not os.path.exists(file_path):\n raise ActivityError(f\"Invalid file path: {file_path}\")\n\n content_type = mimetypes.guess_type(file_path)[0]\n handler = self._handlers.get(content_type)\n if handler:\n content = await handler.read(file_path)\n else:\n raise ActivityError(f\"Cannot read content of type: {content_type}\")\n\n # Add the file path to the app context\n app_job = activity_job.app_job\n app_job.context[\"file_path\"] = file_path\n\n activity_job.state = JobState.SUCCESS\n activity_job.output = content\n activity_job.output_type = \"text/plain\"\n\n except Exception as e:\n activity_job.state = JobState.ERROR\n activity_job.output = str(e)\n self._logger.error(f\"Encountered an error {e}\")" }, { "identifier": "WriteActivity", "path": "aq/activities/write.py", "snippet": "class WriteActivity(BaseActivity):\n HTML_TEMPLATE = \"\"\"<html>\n<head>\n <meta http-equiv=\"content-type\" content=\"text/html; charset=UTF-8\">\n <style>\n .content {\n width: 80%%;\n margin: auto;\n line-height: 1.4rem;\n font-family: Helvetica, Arial, sans-serif;\n font-size: 0.9rem;\n }\n </style>\n</head>\n<body>\n <div class=\"content\">\n %s\n </div>\n</body>\n</html>\"\"\"\n\n def __init__(self):\n self._logger = logging.getLogger(self.__class__.__name__)\n\n async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:\n try:\n app = activity_job.app_job.app\n activity = app.activities[activity_job.activity_name]\n\n # Collate the inputs\n output_format = activity.parameters.get(\"format\", \"md\")\n template = activity.parameters.get(\"template\", None)\n if template:\n text = self.render(template, inputs)\n elif output_format == \"json\":\n text = self.merge_inputs_json(inputs, indent=2)\n else:\n text = self.merge_inputs(inputs)\n\n # Compute the file prefix based on the original file name\n original_file_path = activity_job.app_job.context.get(\"file_path\", None)\n if original_file_path:\n base_name = os.path.basename(original_file_path)\n file_prefix, _ = os.path.splitext(base_name)\n else:\n file_prefix = \"out\"\n\n # Apply formatting\n if output_format == \"html\":\n text = self.HTML_TEMPLATE % markdown.markdown(text, tab_length=2)\n\n # Write content to file\n file_name = self.generate_temp_filename(file_prefix, output_format)\n\n # Create the out directory\n path = \"./out\"\n if not os.path.exists(path):\n os.makedirs(path)\n\n file_path = os.path.join(path, file_name)\n async with aiofiles.open(file_path, mode='w', encoding='utf-8') as file:\n await file.write(text)\n\n activity_job.state = JobState.SUCCESS\n activity_job.output = file_path\n\n except Exception as e:\n self._logger.error(e)\n activity_job.state = JobState.ERROR\n activity_job.output = str(e)" }, { "identifier": "GenerateActivity", "path": "aq/activities/generate.py", "snippet": "class GenerateActivity(BaseActivity):\n TOOL_NAME_DELIMITER = \"__\"\n\n def __init__(self, provider_manager: ProviderManager, tool_manager: ToolManager):\n self._logger = logging.getLogger(self.__class__.__name__)\n self._provider_manager = provider_manager\n self._tool_manager = tool_manager\n\n async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:\n try:\n app = activity_job.app_job.app\n activity = app.activities[activity_job.activity_name]\n\n if len(activity.models) < 1:\n raise ActivityError(f\"A model is required\")\n model = app.models[activity.models[0]]\n\n temperature = float(activity.parameters.get(\"temperature\", model.parameters.get(\"temperature\", 0.5)))\n max_tokens = int(activity.parameters.get(\"max_words\", model.parameters.get(\"max_words\", 500))*4/3)\n\n messages = []\n profile = app.info.profile\n if profile:\n messages.append(ChatCompletionMessage(role=\"system\", content=profile))\n\n json_format = activity.parameters.get(\"format\", None) == \"json\"\n if json_format:\n messages.append(ChatCompletionMessage(\n role=\"system\",\n content=\"Provide your response as a JSON object.\"))\n else:\n messages.append(ChatCompletionMessage(\n role=\"system\",\n content=\"Use the tab length of two spaces when formatting nested lists in markdown.\"))\n\n tools = await self.get_tools(app, activity)\n if tools:\n messages.append(ChatCompletionMessage(\n role=\"system\",\n content=\"Think step-by-step. Perform as many iterations as necessary \"\n \"to accomplish your goal using the tools provided.\"))\n\n prompt_template = activity.parameters[\"prompt\"]\n prompt = self.render_prompt(prompt_template, inputs)\n messages.append(ChatCompletionMessage(role=\"user\", content=prompt))\n\n parts = []\n start_time = time.perf_counter()\n provider = self._provider_manager.get_provider(model.provider)\n for x in range(self.MAX_ITERATIONS):\n request = ChatCompletionRequest(\n model=model.model,\n messages=messages,\n temperature=temperature,\n max_tokens=max_tokens,\n tools=tools if tools else None,\n tool_choice=\"auto\" if tools else None,\n response_format=ResponseFormat(type=\"json_object\") if json_format else None\n )\n\n response = await provider.create_completion(request)\n\n choice: Choice = response.choices[0]\n message: ChatCompletionMessage = choice.message\n messages.append(message)\n\n if choice.finish_reason == \"tool_calls\":\n for tool_call in message.tool_calls:\n tool_result = await self.process_tool_call(tool_call, app)\n messages.append(tool_result)\n else:\n if message.content:\n parts.append(message.content)\n if choice.finish_reason:\n self._logger.debug(f\"Finished with reason {choice.finish_reason} \"\n f\"in {int(time.perf_counter()-start_time)} sec.\")\n break\n\n activity_job.state = JobState.SUCCESS\n activity_job.output = \"\\n\\n\".join(parts)\n activity_job.output_type = \"text/markdown\"\n\n except Exception as e:\n self._logger.error(e)\n activity_job.state = JobState.ERROR\n activity_job.output = str(e)\n\n async def get_tools(self, app: App, activity: Activity) -> List[Tool]:\n tools = []\n if activity.tools:\n for tool_name in activity.tools:\n tool_def = app.tools[tool_name]\n tool_obj = self._tool_manager.get_tool(tool_def.type)\n metadata = await tool_obj.get_metadata(tool_def)\n for tool in metadata:\n func = tool.function\n func.name = f\"{tool_name}{self.TOOL_NAME_DELIMITER}{func.name}\"\n tools.append(tool)\n return tools\n\n async def process_tool_call(self, tool_call: ToolCall, app: App) -> ChatCompletionMessage:\n self._logger.debug(f\"Calling {tool_call.function.name}\")\n\n names = tool_call.function.name.split(self.TOOL_NAME_DELIMITER)\n if len(names) < 2:\n raise ActivityError(f\"Invalid tool name {tool_call.function.name}\")\n\n if names[0] not in app.tools:\n raise ActivityError(f\"{names[0]} is not a valid tool name\")\n\n tool_def = app.tools[names[0]]\n tool_obj = self._tool_manager.get_tool(tool_def.type)\n\n arguments = json.loads(tool_call.function.arguments)\n response = await tool_obj.invoke(names[1], arguments, tool_def)\n\n return ChatCompletionMessage(\n role=\"tool\",\n tool_call_id=tool_call.id,\n name=tool_call.function.name,\n content=response\n )" }, { "identifier": "SummarizeActivity", "path": "aq/activities/summarize.py", "snippet": "class SummarizeActivity(BaseActivity):\n def __init__(self, provider_manager: ProviderManager):\n self._logger = logging.getLogger(self.__class__.__name__)\n self._provider_manager = provider_manager\n\n async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:\n try:\n app = activity_job.app_job.app\n activity = app.activities[activity_job.activity_name]\n\n # Get the text for summarization\n text = self.merge_inputs(inputs)\n if not text:\n raise ActivityError(f\"Text is required\")\n\n # Get the model\n if len(activity.models) < 1:\n raise ActivityError(f\"A model is required\")\n model = app.models[activity.models[0]]\n\n # Get the model parameters\n sentences = int(activity.parameters.get(\"sentences\", model.parameters.get(\"sentences\", 10)))\n temperature = float(activity.parameters.get(\"temperature\", model.parameters.get(\"temperature\", 0.5)))\n\n summary = await self.summarize(app.info.profile, text, model.provider,\n model.model, sentences, temperature)\n\n activity_job.output = summary\n activity_job.state = JobState.SUCCESS\n activity_job.output_type = \"text/markdown\"\n\n except Exception as e:\n activity_job.state = JobState.ERROR\n activity_job.output = str(e)\n self._logger.error(f\"Encountered an error {e}\")\n\n async def summarize(self, context: str, text: str,\n provider_type: ModelProvider, model: str,\n sentences: int, temperature: float) -> str:\n messages = []\n if context:\n messages.append(ChatCompletionMessage(role=\"system\", content=context))\n\n prompt = \"\"\"\n Summarize a block of text provided inside triple back ticks.\n Your summary must be readable. \n Your summary must include about %(sentences)d sentences.\n ```%(text)s```\n \"\"\"\n prompt = prompt % {\"text\": text, \"sentences\": sentences}\n messages.append(ChatCompletionMessage(role=\"user\", content=prompt))\n\n request = ChatCompletionRequest(\n model=model,\n messages=messages,\n temperature=temperature\n )\n provider = self._provider_manager.get_provider(provider_type)\n\n start_time = time.perf_counter()\n\n response = await provider.create_completion(request)\n choice: Choice = response.choices[0]\n message: ChatCompletionMessage = choice.message\n\n self._logger.debug(f\"Finished with reason {choice.finish_reason} \"\n f\"in {int(time.perf_counter()-start_time)} sec.\")\n\n return message.content" }, { "identifier": "ExtractActivity", "path": "aq/activities/extract.py", "snippet": "class ExtractActivity(BaseActivity):\n def __init__(self, provider_manager: ProviderManager):\n self._logger = logging.getLogger(self.__class__.__name__)\n self._provider_manager = provider_manager\n\n async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:\n try:\n app = activity_job.app_job.app\n activity = app.activities[activity_job.activity_name]\n\n if len(activity.models) < 1:\n raise ActivityError(\"A model is required\")\n model = app.models[activity.models[0]]\n\n schema = activity.parameters.get(\"schema\", None)\n if not schema:\n raise ActivityError(\"A schema is required\")\n if not isinstance(schema, list):\n raise ActivityError(\"A schema must be a list of types\")\n\n tools = []\n for schema_def in schema:\n func_def = Function(**schema_def)\n tools.append(Tool(function=func_def))\n\n messages = []\n profile = app.info.profile\n if profile:\n messages.append(ChatCompletionMessage(role=\"system\", content=profile))\n\n messages.append(ChatCompletionMessage(role=\"system\", content=\"\"\"\n Use the tools provided with information extracted from the user prompt.\n Use the entire prompt as the source of information. Do not exclude or omit anything.\n \"\"\"))\n messages.append(ChatCompletionMessage(role=\"user\", content=self.merge_inputs(inputs)))\n\n values: Dict[str, List[Any]] = {}\n provider = self._provider_manager.get_provider(model.provider)\n for x in range(self.MAX_ITERATIONS):\n request = ChatCompletionRequest(\n model=model.model,\n messages=messages,\n temperature=0.0,\n tools=tools,\n tool_choice=\"auto\"\n )\n response = await provider.create_completion(request)\n\n choice: Choice = response.choices[0]\n message: ChatCompletionMessage = choice.message\n messages.append(message)\n\n if choice.finish_reason == \"tool_calls\":\n for tool_call in message.tool_calls:\n function_call = tool_call.function\n values_for_type = values.get(function_call.name, None)\n if not values_for_type:\n values_for_type = []\n values[function_call.name] = values_for_type\n values_for_type.append(json.loads(function_call.arguments))\n messages.append(ChatCompletionMessage(\n role=\"tool\",\n tool_call_id=tool_call.id,\n name=function_call.name,\n content=\"Success\"\n ))\n elif choice.finish_reason:\n break\n\n activity_job.state = JobState.SUCCESS\n activity_job.output = json.dumps(values)\n activity_job.output_type = \"application/json\"\n\n except Exception as e:\n self._logger.error(e)\n activity_job.state = JobState.ERROR\n activity_job.output = str(e)" }, { "identifier": "StoreActivity", "path": "aq/activities/store.py", "snippet": "class StoreActivity(BaseActivity):\n def __init__(self, memory_manager: MemoryManager):\n self._logger = logging.getLogger(self.__class__.__name__)\n self._memory_manager = memory_manager\n\n async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:\n try:\n app = activity_job.app_job.app\n if not app.memory:\n raise ActivityError(\"A memory repository is required for this app\")\n\n activity = app.activities[activity_job.activity_name]\n if not activity.memory:\n raise ActivityError(\"A memory repository is required for this activity\")\n\n memory_def = app.memory[activity.memory[0]]\n memory_repository = self._memory_manager.get_repository(memory_def.type)\n\n file_id = activity_job.app_job.context.get(\"file_path\", str(uuid.uuid4()))\n chunk_size = activity.parameters.get(\"chunk_size\", memory_def.parameters.get(\"chunk_size\", 2000))\n\n text = self.merge_inputs(inputs)\n chunks = memory_repository.store(memory_def, app.info.id, file_id, text, chunk_size)\n\n activity_job.state = JobState.SUCCESS\n activity_job.output = str(chunks)\n activity_job.output_type = \"text/plain\"\n\n except Exception as e:\n activity_job.state = JobState.ERROR\n activity_job.output = str(e)\n self._logger.error(f\"Encountered an error {e}\")" }, { "identifier": "RetrieveActivity", "path": "aq/activities/retrieve.py", "snippet": "class RetrieveActivity(BaseActivity):\n def __init__(self, memory_manager: MemoryManager):\n self._logger = logging.getLogger(self.__class__.__name__)\n self._memory_manager = memory_manager\n\n async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:\n try:\n app = activity_job.app_job.app\n if not app.memory:\n raise ActivityError(\"A memory repository is required for this app\")\n\n activity = app.activities[activity_job.activity_name]\n if not activity.memory:\n raise ActivityError(\"A memory repository is required for this activity\")\n\n memory_def = app.memory[activity.memory[0]]\n memory_repository = self._memory_manager.get_repository(memory_def.type)\n\n n_results = activity.parameters.get(\"n_results\", 3)\n\n chunks = memory_repository.retrieve(memory_def, app.info.id, self.merge_inputs(inputs), n_results)\n\n activity_job.state = JobState.SUCCESS\n activity_job.output = \"\\n\\n\".join(chunks)\n activity_job.output_type = \"text/plain\"\n\n except Exception as e:\n activity_job.state = JobState.ERROR\n activity_job.output = str(e)\n self._logger.error(f\"Encountered an error {e}\")" }, { "identifier": "FunctionActivity", "path": "aq/activities/function.py", "snippet": "class FunctionActivity(BaseActivity):\n def __init__(self):\n self._logger = logging.getLogger(self.__class__.__name__)\n\n async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:\n activity_job.state = JobState.SUCCESS\n activity_job.output = self.merge_inputs(inputs)\n activity_job.output_type = \"text/plain\"" }, { "identifier": "ReturnActivity", "path": "aq/activities/function.py", "snippet": "class ReturnActivity(BaseActivity):\n def __init__(self):\n self._logger = logging.getLogger(self.__class__.__name__)\n\n async def perform(self, activity_job: ActivityJob, inputs: Dict[str, Any]) -> None:\n try:\n activity_job.state = JobState.SUCCESS\n activity_job.output = self.merge_inputs_json(inputs)\n activity_job.output_type = \"application/json\"\n except Exception as e:\n activity_job.state = JobState.ERROR\n activity_job.output = str(e)\n self._logger.error(f\"Encountered an error {e}\")" }, { "identifier": "ActivityType", "path": "aq/types/app.py", "snippet": "class ActivityType(Enum):\n ANY = \"any\"\n READ = \"read\"\n WRITE = \"write\"\n STORE = \"store\"\n RETRIEVE = \"retrieve\"\n SUMMARIZE = \"summarize\"\n EXTRACT = \"extract\"\n GENERATE = \"generate\"\n FUNCTION = \"function\"\n CALL = \"call\"\n RETURN = \"return\"" }, { "identifier": "ActivityJob", "path": "aq/types/job.py", "snippet": "class ActivityJob:\n id: str\n activity_name: str\n app_job: AppJob\n state: JobState\n output: str\n output_type: str = \"text/plain\"\n\n def __init__(self, activity_name: str, app_job: AppJob):\n self.id = str(uuid.uuid4())\n self.activity_name = activity_name\n self.app_job = app_job\n self.state = JobState.CREATED\n self.output = \"\"\n\n @property\n def finished(self) -> bool:\n return self.state == JobState.SUCCESS or self.state == JobState.ERROR" }, { "identifier": "JobState", "path": "aq/types/job.py", "snippet": "class JobState(Enum):\n ANY = 0,\n CREATED = 1,\n RUNNING = 2,\n SUCCESS = 3\n ERROR = 4" } ]
import asyncio import logging import time from typing import Dict, Any from .manager import JobManager, AppJobError from ..activities import ( ReadActivity, WriteActivity, SummarizeActivity, GenerateActivity, ExtractActivity, StoreActivity, RetrieveActivity, FunctionActivity, ReturnActivity ) from ..types import ActivityType, ActivityJob, JobState
5,683
class WorkItem: job: ActivityJob inputs: Dict[str, Any] def __init__(self, job: ActivityJob, inputs: Dict[str, Any]) -> None: self.job = job self.inputs = inputs class JobScheduler: def __init__(self, config: Dict[str, Any], job_manager: JobManager, read_activity: ReadActivity, write_activity: WriteActivity, summarize_activity: SummarizeActivity, generate_activity: GenerateActivity, extract_activity: ExtractActivity, store_activity: StoreActivity, retrieve_activity: RetrieveActivity,
class WorkItem: job: ActivityJob inputs: Dict[str, Any] def __init__(self, job: ActivityJob, inputs: Dict[str, Any]) -> None: self.job = job self.inputs = inputs class JobScheduler: def __init__(self, config: Dict[str, Any], job_manager: JobManager, read_activity: ReadActivity, write_activity: WriteActivity, summarize_activity: SummarizeActivity, generate_activity: GenerateActivity, extract_activity: ExtractActivity, store_activity: StoreActivity, retrieve_activity: RetrieveActivity,
function_activity: FunctionActivity, return_activity: ReturnActivity):
9
2023-12-14 13:25:52+00:00
8k
multimodallearning/DG-TTA
dg_tta/run.py
[ { "identifier": "inject_dg_trainers_into_nnunet", "path": "dg_tta/__build__.py", "snippet": "def inject_dg_trainers_into_nnunet(num_epochs=1000):\n dg_trainer_paths = Path(pretraining.__file__).parent.glob(\"nnUNetTrainer*.py\")\n target_dir = Path(nnunetv2.__path__[0], \"training/nnUNetTrainer/variants/dg_tta/\")\n target_dir.mkdir(exist_ok=True)\n (target_dir / \"__init__.py\").touch() # Make directory a mdoule\n\n for tr in dg_trainer_paths:\n # open file\n with open(tr, \"r\") as f:\n tr_code = f.read()\n tr_code_with_set_epochs = re.sub(\n r\"self\\.num_epochs = \\d+\", f\"self.num_epochs = {num_epochs}\", tr_code\n )\n\n with open(target_dir / tr.name, \"w\") as f:\n f.write(tr_code_with_set_epochs)" }, { "identifier": "check_trainers_injected", "path": "dg_tta/__build__.py", "snippet": "def check_trainers_injected():\n target_dir = Path(nnunetv2.__path__[0], \"training/nnUNetTrainer/variants/dg_tta/\")\n assert (\n target_dir.exists()\n ), \"DG trainers not injected into nnUNet module. Please inject trainers first.\"" }, { "identifier": "check_dga_root_is_set", "path": "dg_tta/utils.py", "snippet": "def check_dga_root_is_set(soft_check=False):\n prompt = \"Please define an existing root directory for DG-TTA by setting DG_TTA_ROOT.\"\n check = Path(\n os.environ.get(\"DG_TTA_ROOT\", \"_\")\n ).is_dir()\n\n if soft_check and not check:\n print(prompt)\n return\n\n assert check, prompt" }, { "identifier": "generate_label_mapping", "path": "dg_tta/tta/torch_utils.py", "snippet": "def generate_label_mapping(source_label_dict, target_label_dict):\n assert all([isinstance(k, str) for k in source_label_dict.keys()])\n assert all([isinstance(k, str) for k in target_label_dict.keys()])\n assert set(source_label_dict.keys()).intersection(\n target_label_dict.keys()\n ), \"There are no intersecting label names in given dicts.\"\n mapped_label = []\n\n mapping_dict = dict.fromkeys(\n list(source_label_dict.keys()) + list(target_label_dict.keys())\n )\n\n for key in mapping_dict:\n if key in source_label_dict and key in target_label_dict:\n mapping_dict[key] = (source_label_dict[key], target_label_dict[key])\n\n return {k: v for k, v in mapping_dict.items() if v is not None}" }, { "identifier": "wandb_run", "path": "dg_tta/tta/config_log_utils.py", "snippet": "def wandb_run(wandb_project_name, tta_fn, **kwargs):\n config = kwargs[\"config\"]\n with wandb.init(\n project=wandb_project_name,\n name=kwargs[\"run_name\"],\n mode=config[\"wandb_mode\"],\n config=config,\n ):\n kwargs[\"config\"] = wandb.config\n tta_fn(**kwargs)\n wandb.finish()\n torch.cuda.empty_cache()" }, { "identifier": "load_current_modifier_functions", "path": "dg_tta/tta/config_log_utils.py", "snippet": "def load_current_modifier_functions(plan_dir):\n mod_path = Path(plan_dir / \"modifier_functions.py\")\n spec = importlib.util.spec_from_file_location(\n \"dg_tta.current_modifier_functions\", mod_path\n )\n dyn_mod = importlib.util.module_from_spec(spec)\n sys.modules[\"dg_tta.current_modifier_functions\"] = dyn_mod\n spec.loader.exec_module(dyn_mod)\n\n return dyn_mod" }, { "identifier": "get_tta_folders", "path": "dg_tta/tta/config_log_utils.py", "snippet": "def get_tta_folders(\n pretrained_dataset_id,\n tta_dataset_id,\n pretrainer,\n pretrainer_config,\n pretrainer_fold,\n):\n root_dir = Path(os.environ[\"DG_TTA_ROOT\"])\n\n # Get dataset names\n tta_dataset_name = maybe_convert_to_dataset_name(tta_dataset_id)\n\n if isinstance(pretrained_dataset_id, int):\n pretrained_dataset_name = maybe_convert_to_dataset_name(pretrained_dataset_id)\n else:\n pretrained_dataset_name = pretrained_dataset_id\n\n fold_folder = (\n f\"fold_{pretrainer_fold}\" if pretrainer_fold != \"all\" else pretrainer_fold\n )\n map_folder = f\"Pretrained_{pretrained_dataset_name}_at_{tta_dataset_name}\"\n pretrainer_folder = f\"{pretrainer}__{pretrainer_config}\"\n\n plan_dir = root_dir / \"plans\" / map_folder / pretrainer_folder / fold_folder\n results_dir = root_dir / \"results\" / map_folder / pretrainer_folder / fold_folder\n\n tta_data_dir = Path(nnUNet_raw, tta_dataset_name)\n\n return (\n tta_data_dir,\n plan_dir,\n results_dir,\n pretrained_dataset_name,\n tta_dataset_name,\n )" }, { "identifier": "wandb_run_is_available", "path": "dg_tta/tta/config_log_utils.py", "snippet": "def wandb_run_is_available():\n return (\n importlib.util.find_spec(\"wandb\") is not None\n and wandb.run is not None\n and not wandb.run.disabled\n )" }, { "identifier": "check_dataset_pretrain_config", "path": "dg_tta/tta/config_log_utils.py", "snippet": "def check_dataset_pretrain_config(\n pretrained_dataset_id, pretrainer, pretrainer_config, pretrainer_fold\n):\n pretrained_dataset_id = (\n int(pretrained_dataset_id)\n if pretrained_dataset_id.isnumeric()\n else pretrained_dataset_id\n )\n\n pretrainer_fold = (\n int(pretrainer_fold)\n if pretrainer_fold.isnumeric()\n else pretrainer_fold\n )\n\n assert pretrained_dataset_id in [\n \"TS104_GIN\",\n \"TS104_MIND\",\n \"TS104_GIN_MIND\",\n ] or isinstance(pretrained_dataset_id, int)\n\n if isinstance(pretrained_dataset_id, int):\n # Check fold specifier\n assert pretrainer is not None\n assert pretrainer_config is not None\n assert pretrainer_fold == \"all\" or isinstance(pretrainer_fold, int)\n else:\n if pretrained_dataset_id == \"TS104_GIN\":\n pretrainer = \"nnUNetTrainer_GIN\"\n pretrainer_config = \"3d_fullres\"\n pretrainer_fold = \"0\"\n\n elif pretrained_dataset_id == \"TS104_MIND\":\n pretrainer = \"nnUNetTrainer_MIND\"\n pretrainer_config = \"3d_fullres\"\n pretrainer_fold = \"0\"\n\n elif pretrained_dataset_id == \"TS104_GIN_MIND\":\n pretrainer = \"nnUNetTrainer_GIN_MIND\"\n pretrainer_config = \"3d_fullres\"\n pretrainer_fold = \"0\"\n\n else:\n raise ValueError()\n\n return pretrained_dataset_id, pretrainer, pretrainer_config, pretrainer_fold" }, { "identifier": "tta_main", "path": "dg_tta/tta/tta.py", "snippet": "def tta_main(\n run_name,\n config,\n tta_data_dir,\n save_base_path,\n label_mapping,\n modifier_fn_module,\n device,\n debug=False,\n):\n START_CLASS = 1 # Do not use background for consistency loss\n\n # Load model\n pretrained_weights_filepath = config[\"pretrained_weights_filepath\"]\n predictor, patch_size, network, parameters = load_network(\n pretrained_weights_filepath, device\n )\n\n # Load TTA data\n tta_across_all_samples = config[\"tta_across_all_samples\"]\n\n tqdm.write(\"\\n# Loading data\")\n tta_data, num_samples = load_tta_data(\n config, tta_data_dir, predictor, tta_across_all_samples\n )\n\n if tta_across_all_samples:\n # TTA data is a list\n inference_data = tta_data\n else:\n # TTA data is a generator, so we need to copy it for inference\n tta_data, inference_data = tee(tta_data)\n\n ensemble_count = config[\"ensemble_count\"]\n B = config[\"batch_size\"]\n patches_to_be_accumulated = config[\"patches_to_be_accumulated\"]\n tta_eval_patches = config[\"tta_eval_patches\"]\n num_epochs = config[\"epochs\"]\n start_tta_at_epoch = config[\"start_tta_at_epoch\"]\n\n optimized_labels = config[\"optimized_labels\"]\n\n save_path = Path(save_base_path) / run_name\n save_path.mkdir(exist_ok=True, parents=False)\n\n with open(save_path / \"tta_plan.json\", \"w\") as f:\n json.dump({k: v for k, v in config.items()}, f, indent=4)\n\n sitk_io = SimpleITKIO()\n\n identity_grid = F.affine_grid(\n torch.eye(4, device=device).repeat(B, 1, 1)[:, :3],\n [B, 1] + patch_size,\n align_corners=False,\n )\n\n if tta_across_all_samples:\n sample_range = [0]\n else:\n sample_range = trange(num_samples, desc=\"Samples\")\n\n disable_internal_augmentation()\n\n tqdm.write(\"\\n# Starting TTA\")\n for smp_idx in sample_range:\n _, tta_tens_list, sample_id, sample_extension, sub_dir_tta = get_sample_specs(\n config, smp_idx, tta_data, save_path, tta_across_all_samples\n )\n tqdm.write(f\"\\nSample {sample_id}\")\n\n sub_dir_tta.mkdir(exist_ok=True)\n\n for ensemble_idx in trange(ensemble_count, desc=\"Ensembles\"):\n tta_parameters_save_path = get_parameters_save_path(\n sub_dir_tta, sample_id, ensemble_idx\n )\n if tta_parameters_save_path.is_file():\n tqdm.write(\n f\"TTA parameters file already exists. Skipping '{tta_parameters_save_path}'\"\n )\n continue\n\n tta_losses = torch.zeros(num_epochs)\n eval_dices = torch.zeros(num_epochs)\n\n intensity_aug_func = INTENSITY_AUG_FUNCTION_DICT[\n config[\"intensity_aug_function\"]\n ]\n\n model = get_model_from_network(network, modifier_fn_module, parameters)\n model = model.to(device)\n\n optimizer = torch.optim.AdamW(model.parameters(), lr=config[\"lr\"])\n\n tbar = trange(num_epochs, desc=\"Epoch\")\n\n model.apply(fix_all)\n for epoch in tbar:\n model.train()\n global_idx = get_global_idx(\n [\n (smp_idx, num_samples),\n (ensemble_idx, ensemble_count),\n (epoch, num_epochs),\n ]\n )\n if wandb_run_is_available():\n wandb.log({\"ref_epoch_idx\": epoch}, global_idx)\n step_losses = []\n\n if epoch == start_tta_at_epoch:\n model.apply(fix_all)\n if config[\"params_with_grad\"] == \"all\":\n model.apply(release_all)\n elif config[\"params_with_grad\"] == \"norms\":\n model.apply(release_norms)\n elif config[\"params_with_grad\"] == \"encoder\":\n model.encoder.apply(release_all)\n else:\n raise ValueError()\n\n grad_params = {\n id(p): p.numel() for p in model.parameters() if p.requires_grad\n }\n tqdm.write(\n f\"Released #{sum(list(grad_params.values()))/1e6:.2f} million trainable params\"\n )\n\n for _ in range(patches_to_be_accumulated):\n with torch.no_grad():\n imgs, _ = get_batch(\n tta_tens_list,\n torch.randperm(len(tta_tens_list))[:B],\n patch_size,\n fixed_patch_idx=None,\n device=device,\n )\n\n imgs = torch.cat(imgs, dim=0)\n\n target_a = calc_branch(\n \"branch_a\",\n config,\n model,\n intensity_aug_func,\n identity_grid,\n patch_size,\n B,\n label_mapping,\n optimized_labels,\n modifier_fn_module,\n imgs,\n device,\n )\n target_b = calc_branch(\n \"branch_b\",\n config,\n model,\n intensity_aug_func,\n identity_grid,\n patch_size,\n B,\n label_mapping,\n optimized_labels,\n modifier_fn_module,\n imgs,\n device,\n )\n\n # Apply consistency loss\n common_content_mask = (\n target_a.sum(1, keepdim=True) > 0.0\n ).float() * (target_b.sum(1, keepdim=True) > 0.0).float()\n sm_a = target_a.softmax(1) * common_content_mask\n sm_b = target_b.softmax(1) * common_content_mask\n\n loss = 1 - soft_dice_loss(sm_a, sm_b)[:, START_CLASS:].mean()\n\n loss_accum = loss / patches_to_be_accumulated\n step_losses.append(loss.detach().cpu())\n\n if epoch >= start_tta_at_epoch:\n loss_accum.backward()\n\n if epoch >= start_tta_at_epoch:\n optimizer.step()\n optimizer.zero_grad()\n\n tta_losses[epoch] = torch.stack(step_losses).mean().item()\n\n with torch.inference_mode():\n model.eval()\n for _ in range(tta_eval_patches):\n imgs, labels = get_batch(\n tta_tens_list,\n torch.randperm(len(tta_tens_list))[:B],\n patch_size,\n fixed_patch_idx=\"center\", # This is just for evaluation purposes\n device=device,\n )\n\n imgs = torch.cat(imgs, dim=0)\n\n none_labels = [l is None for l in labels]\n filtered_imgs = imgs[~torch.as_tensor(none_labels)]\n filtered_labels = [\n l for flag, l in zip(none_labels, labels) if not flag\n ]\n\n if len(filtered_imgs) == 0:\n eval_dices[epoch] = float(\"nan\")\n continue\n\n else:\n filtered_labels = torch.cat(filtered_labels, dim=0)\n output_eval = model(filtered_imgs)\n if isinstance(output_eval, tuple):\n output_eval = output_eval[0]\n\n output_eval = map_label(\n output_eval,\n get_map_idxs(\n label_mapping,\n optimized_labels,\n input_type=\"pretrain_labels\",\n ),\n input_format=\"logits\",\n )\n target_argmax = output_eval.argmax(1)\n\n filtered_labels = map_label(\n filtered_labels,\n get_map_idxs(\n label_mapping,\n optimized_labels,\n input_type=\"tta_labels\",\n ),\n input_format=\"argmaxed\",\n ).long()\n d_tgt_val = dice_coeff(\n target_argmax, filtered_labels, len(optimized_labels)\n )\n\n eval_dices[epoch] += (\n 1 / tta_eval_patches * d_tgt_val.nanmean().item()\n )\n\n if debug:\n break\n\n tbar.set_description(\n f\"Epochs, loss={tta_losses[epoch]:.3f}, Pseudo-Dice={eval_dices[epoch]*100:.1f}%\"\n )\n if wandb_run_is_available():\n wandb.log(\n {\n f\"losses/loss__{sample_id}__ensemble_idx_{ensemble_idx}\": tta_losses[\n epoch\n ]\n },\n step=global_idx,\n )\n wandb.log(\n {\n f\"scores/eval_dice__{sample_id}__ensemble_idx_{ensemble_idx}\": eval_dices[\n epoch\n ]\n },\n step=global_idx,\n )\n\n tta_parameters = [model.state_dict()]\n torch.save(tta_parameters, tta_parameters_save_path)\n\n if not wandb_run_is_available() and num_epochs > 0:\n plot_run_results(\n sub_dir_tta, sample_id, ensemble_idx, tta_losses, eval_dices\n )\n\n if debug:\n break\n # End of ensemble loop\n\n print(\"\\n\\n# Starting inference\")\n all_prediction_save_paths = []\n\n for smp_idx in trange(num_samples, desc=\"Samples\"):\n ensemble_parameter_paths = []\n\n tta_sample, tta_tens_list, param_sample_id, sample_extension, sub_dir_tta = get_sample_specs(\n config, smp_idx, inference_data, save_path, across_all_samples=False\n )\n tqdm.write(f\"\\nSample {param_sample_id}\\n\")\n tta_sample[\"data\"] = get_imgs(tta_sample[\"data\"].unsqueeze(0)).squeeze(0)\n\n # Update internal save path for nnUNet\n ofile = tta_sample[\"ofile\"]\n new_ofile = str(save_path / ofile)\n tta_sample[\"ofile\"] = new_ofile\n\n prediction_save_path = Path(new_ofile + sample_extension)\n prediction_save_path.parent.mkdir(exist_ok=True)\n\n for ensemble_idx in range(config[\"ensemble_count\"]):\n ensemble_parameter_paths.append(\n get_parameters_save_path(sub_dir_tta, param_sample_id, ensemble_idx)\n )\n\n disable_internal_augmentation()\n model = get_model_from_network(network, modifier_fn_module)\n\n predicted_output_array = run_inference(tta_sample, model, predictor, ensemble_parameter_paths)\n data_properties = tta_sample['data_properties']\n\n predicted_output = map_label(\n torch.as_tensor(predicted_output_array),\n get_map_idxs(label_mapping, optimized_labels, input_type=\"pretrain_labels\"),\n input_format=\"argmaxed\",\n ).squeeze(0)\n\n sitk_io.write_seg(\n predicted_output.numpy(), prediction_save_path, properties=data_properties\n )\n all_prediction_save_paths.append(prediction_save_path)\n\n # End of sample loop\n\n tqdm.write(\"\\n\\nEvaluating predictions\")\n\n for pred_path in all_prediction_save_paths:\n pred_label_name = Path(pred_path).name\n if \"outputTs\" in Path(pred_path).parent.parts[-1]:\n path_mapped_target = save_path / \"mapped_target_labelsTs\" / pred_label_name\n path_orig_target = tta_data_dir / \"labelsTs\" / pred_label_name\n elif \"outputTr\" in Path(pred_path).parent.parts[-1]:\n path_mapped_target = save_path / \"mapped_target_labelsTr\" / pred_label_name\n path_orig_target = tta_data_dir / \"labelsTr\" / pred_label_name\n else:\n raise ValueError()\n\n if not path_orig_target.is_file():\n # No target available\n continue\n\n path_mapped_target.parent.mkdir(exist_ok=True)\n shutil.copy(path_orig_target, path_mapped_target)\n\n seg, sitk_stuff = sitk_io.read_seg(path_mapped_target)\n seg = torch.as_tensor(seg)\n mapped_seg = map_label(\n seg,\n get_map_idxs(label_mapping, optimized_labels, input_type=\"tta_labels\"),\n input_format=\"argmaxed\",\n ).squeeze(0)\n sitk_io.write_seg(mapped_seg.squeeze(0).numpy(), path_mapped_target, sitk_stuff)\n\n for bucket in [\"Ts\", \"Tr\"]:\n all_mapped_targets_path = save_path / f\"mapped_target_labels{bucket}\"\n all_pred_targets_path = save_path / f\"tta_output{bucket}\"\n\n if not all_mapped_targets_path.is_dir() or not all_pred_targets_path.is_dir():\n continue\n\n # Run postprocessing\n postprocess_results_fn = (\n modifier_fn_module.ModifierFunctions.postprocess_results_fn\n )\n postprocess_results_fn(all_pred_targets_path)\n\n summary_path = f\"{save_path}/summary_{bucket}.json\"\n compute_metrics_on_folder_simple(\n folder_ref=all_mapped_targets_path,\n folder_pred=all_pred_targets_path,\n labels=list(range(len(optimized_labels))),\n output_file=summary_path,\n num_processes=config[\"num_processes\"],\n chill=True,\n )\n\n with open(summary_path, \"r\") as f:\n summary_json = json.load(f)\n final_mean_dice = summary_json[\"foreground_mean\"][\"Dice\"]\n\n if wandb_run_is_available():\n wandb.log({f\"scores/tta_dice_mean_{bucket}\": final_mean_dice})" } ]
import sys import re import json import argparse import json import torch import torch.nn.functional as F import randomname import dg_tta from pathlib import Path from datetime import datetime from nnunetv2.run.run_training import run_training_entry as nnunet_run_training_main from dg_tta.__build__ import inject_dg_trainers_into_nnunet, check_trainers_injected from dg_tta.utils import check_dga_root_is_set from dg_tta.tta.torch_utils import generate_label_mapping from dg_tta.tta.config_log_utils import ( wandb_run, load_current_modifier_functions, get_tta_folders, wandb_run_is_available, check_dataset_pretrain_config, ) from dg_tta.tta.tta import tta_main
6,321
"--pretrainer", help="Trainer to use for pretraining", default=None, ) parser.add_argument( "--pretrainer_config", help="Fold ID of nnUNet model to use for pretraining", default="3d_fullres", ) parser.add_argument( "--pretrainer_fold", help="Fold ID of nnUNet model to use for pretraining", default="0", ) parser.add_argument( "--tta_dataset_bucket", help="""Can be one of ['imagesTr', 'imagesTs', 'imagesTrAndTs']""", default="imagesTs", ) args = parser.parse_args(sys.argv[2:]) pretrained_dataset_id, pretrainer, pretrainer_config, pretrainer_fold = check_dataset_pretrain_config( args.pretrained_dataset_id, args.pretrainer, args.pretrainer_config, args.pretrainer_fold ) dg_tta.tta.config_log_utils.prepare_tta( pretrained_dataset_id, int(args.tta_dataset_id), pretrainer=pretrainer, pretrainer_config=pretrainer_config, pretrainer_fold=pretrainer_fold, tta_dataset_bucket=args.tta_dataset_bucket, ) def run_tta(self): check_trainers_injected() parser = argparse.ArgumentParser(description="Run DG-TTA") parser.add_argument( "pretrained_dataset_id", help=""" Task ID for pretrained model. Can be numeric or one of ['TS104_GIN', 'TS104_MIND', 'TS104_GIN_MIND']""", ) parser.add_argument("tta_dataset_id", help="Task ID for TTA") parser.add_argument( "--pretrainer", help="Trainer to use for pretraining", default=None, ) parser.add_argument( "--pretrainer_config", help="Fold ID of nnUNet model to use for pretraining", default="3d_fullres", ) parser.add_argument( "--pretrainer_fold", help="Fold ID of nnUNet model to use for pretraining", default="0", ) parser.add_argument("--device", help="Device to be used", default="cuda") args = parser.parse_args(sys.argv[2:]) pretrained_dataset_id, pretrainer, pretrainer_config, pretrainer_fold = check_dataset_pretrain_config( args.pretrained_dataset_id, args.pretrainer, args.pretrainer_config, args.pretrainer_fold ) ( tta_data_dir, plan_dir, results_dir, pretrained_dataset_name, tta_dataset_name, ) = get_tta_folders( pretrained_dataset_id, int(args.tta_dataset_id), pretrainer, pretrainer_config, pretrainer_fold, ) now_str = datetime.now().strftime("%Y%m%d__%H_%M_%S") numbers = [ int(re.search(r"[0-9]+$", str(_path))[0]) for _path in results_dir.iterdir() ] if len(numbers) == 0: run_no = 0 else: run_no = torch.as_tensor(numbers).max().item() + 1 run_name = f"{now_str}_{randomname.get_name()}-{run_no}" with open(Path(plan_dir / "tta_plan.json"), "r") as f: config = json.load(f) with open( Path(plan_dir) / f"{pretrained_dataset_name}_label_mapping.json", "r" ) as f: pretrained_label_mapping = json.load(f) with open(Path(plan_dir) / f"{tta_dataset_name}_label_mapping.json", "r") as f: tta_dataset_label_mapping = json.load(f) label_mapping = generate_label_mapping( pretrained_label_mapping, tta_dataset_label_mapping ) modifier_fn_module = load_current_modifier_functions(plan_dir) device = torch.device(args.device) kwargs = dict( run_name=run_name, config=config, tta_data_dir=tta_data_dir, save_base_path=results_dir, label_mapping=label_mapping, modifier_fn_module=modifier_fn_module, device=device, )
PROJECT_NAME = "nnunet_tta" class DGTTAProgram: def __init__(self): parser = argparse.ArgumentParser( description="DG-TTA for nnUNetv2", usage="""dgtta <command> [<args>] Commands are: inject_trainers Inject DG trainers into nnUNet module pretrain Pretrain on a dataset with DG trainers prepare_tta Prepare test-time adaptation run_tta Run test-time adaptation """, ) parser.add_argument("command", help="Subcommand to run") args = parser.parse_args(sys.argv[1:2]) if not hasattr(self, args.command): print("Unrecognized command") parser.print_help() exit(1) getattr(self, args.command)() def inject_trainers(self): parser = argparse.ArgumentParser( description="Inject DG-TTA trainers into nnUNet module code" ) parser.add_argument( "--num_epochs", type=int, default=1000, help="Number of epochs to train" ) args = parser.parse_args(sys.argv[2:]) inject_dg_trainers_into_nnunet(args.num_epochs) def pretrain(self): check_trainers_injected() print("Dispatching into nnUNetv2_train.") sys.argv = sys.argv[2:] sys.argv.insert(0, "nnUNetv2_train") nnunet_run_training_main() def prepare_tta(self): check_trainers_injected() parser = argparse.ArgumentParser( description="Prepare DG-TTA", usage="""dgtta prepare_tta [-h]""" ) parser.add_argument( "pretrained_dataset_id", help=""" Task ID for pretrained model. Can be numeric or one of ['TS104_GIN', 'TS104_MIND', 'TS104_GIN_MIND']""", ) parser.add_argument("tta_dataset_id", help="Task ID for TTA") parser.add_argument( "--pretrainer", help="Trainer to use for pretraining", default=None, ) parser.add_argument( "--pretrainer_config", help="Fold ID of nnUNet model to use for pretraining", default="3d_fullres", ) parser.add_argument( "--pretrainer_fold", help="Fold ID of nnUNet model to use for pretraining", default="0", ) parser.add_argument( "--tta_dataset_bucket", help="""Can be one of ['imagesTr', 'imagesTs', 'imagesTrAndTs']""", default="imagesTs", ) args = parser.parse_args(sys.argv[2:]) pretrained_dataset_id, pretrainer, pretrainer_config, pretrainer_fold = check_dataset_pretrain_config( args.pretrained_dataset_id, args.pretrainer, args.pretrainer_config, args.pretrainer_fold ) dg_tta.tta.config_log_utils.prepare_tta( pretrained_dataset_id, int(args.tta_dataset_id), pretrainer=pretrainer, pretrainer_config=pretrainer_config, pretrainer_fold=pretrainer_fold, tta_dataset_bucket=args.tta_dataset_bucket, ) def run_tta(self): check_trainers_injected() parser = argparse.ArgumentParser(description="Run DG-TTA") parser.add_argument( "pretrained_dataset_id", help=""" Task ID for pretrained model. Can be numeric or one of ['TS104_GIN', 'TS104_MIND', 'TS104_GIN_MIND']""", ) parser.add_argument("tta_dataset_id", help="Task ID for TTA") parser.add_argument( "--pretrainer", help="Trainer to use for pretraining", default=None, ) parser.add_argument( "--pretrainer_config", help="Fold ID of nnUNet model to use for pretraining", default="3d_fullres", ) parser.add_argument( "--pretrainer_fold", help="Fold ID of nnUNet model to use for pretraining", default="0", ) parser.add_argument("--device", help="Device to be used", default="cuda") args = parser.parse_args(sys.argv[2:]) pretrained_dataset_id, pretrainer, pretrainer_config, pretrainer_fold = check_dataset_pretrain_config( args.pretrained_dataset_id, args.pretrainer, args.pretrainer_config, args.pretrainer_fold ) ( tta_data_dir, plan_dir, results_dir, pretrained_dataset_name, tta_dataset_name, ) = get_tta_folders( pretrained_dataset_id, int(args.tta_dataset_id), pretrainer, pretrainer_config, pretrainer_fold, ) now_str = datetime.now().strftime("%Y%m%d__%H_%M_%S") numbers = [ int(re.search(r"[0-9]+$", str(_path))[0]) for _path in results_dir.iterdir() ] if len(numbers) == 0: run_no = 0 else: run_no = torch.as_tensor(numbers).max().item() + 1 run_name = f"{now_str}_{randomname.get_name()}-{run_no}" with open(Path(plan_dir / "tta_plan.json"), "r") as f: config = json.load(f) with open( Path(plan_dir) / f"{pretrained_dataset_name}_label_mapping.json", "r" ) as f: pretrained_label_mapping = json.load(f) with open(Path(plan_dir) / f"{tta_dataset_name}_label_mapping.json", "r") as f: tta_dataset_label_mapping = json.load(f) label_mapping = generate_label_mapping( pretrained_label_mapping, tta_dataset_label_mapping ) modifier_fn_module = load_current_modifier_functions(plan_dir) device = torch.device(args.device) kwargs = dict( run_name=run_name, config=config, tta_data_dir=tta_data_dir, save_base_path=results_dir, label_mapping=label_mapping, modifier_fn_module=modifier_fn_module, device=device, )
if wandb_run_is_available():
7
2023-12-08 08:43:11+00:00
8k
tommy-xq/SA2VP
datasets.py
[ { "identifier": "RandomResizedCropAndInterpolationWithTwoPic", "path": "transforms.py", "snippet": "class RandomResizedCropAndInterpolationWithTwoPic:\n \"\"\"Crop the given PIL Image to random size and aspect ratio with random interpolation.\n\n A crop of random size (default: of 0.08 to 1.0) of the original size and a random\n aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n\n Args:\n size: expected output size of each edge\n scale: range of size of the origin size cropped\n ratio: range of aspect ratio of the origin aspect ratio cropped\n interpolation: Default: PIL.Image.BILINEAR\n \"\"\"\n\n def __init__(self, size, second_size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),\n interpolation='bilinear', second_interpolation='lanczos'):\n if isinstance(size, tuple):\n self.size = size\n else:\n self.size = (size, size)\n if second_size is not None:\n if isinstance(second_size, tuple):\n self.second_size = second_size\n else:\n self.second_size = (second_size, second_size)\n else:\n self.second_size = None\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"range should be of kind (min, max)\")\n\n if interpolation == 'random':\n self.interpolation = _RANDOM_INTERPOLATION\n else:\n self.interpolation = _pil_interp(interpolation)\n self.second_interpolation = _pil_interp(second_interpolation)\n self.scale = scale\n self.ratio = ratio\n\n @staticmethod\n def get_params(img, scale, ratio):\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n scale (tuple): range of size of the origin size cropped\n ratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n area = img.size[0] * img.size[1]\n\n for attempt in range(10):\n target_area = random.uniform(*scale) * area\n log_ratio = (math.log(ratio[0]), math.log(ratio[1]))\n aspect_ratio = math.exp(random.uniform(*log_ratio))\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if w <= img.size[0] and h <= img.size[1]:\n i = random.randint(0, img.size[1] - h)\n j = random.randint(0, img.size[0] - w)\n return i, j, h, w\n\n # Fallback to central crop\n in_ratio = img.size[0] / img.size[1]\n if in_ratio < min(ratio):\n w = img.size[0]\n h = int(round(w / min(ratio)))\n elif in_ratio > max(ratio):\n h = img.size[1]\n w = int(round(h * max(ratio)))\n else: # whole image\n w = img.size[0]\n h = img.size[1]\n i = (img.size[1] - h) // 2\n j = (img.size[0] - w) // 2\n return i, j, h, w\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be cropped and resized.\n\n Returns:\n PIL Image: Randomly cropped and resized image.\n \"\"\"\n i, j, h, w = self.get_params(img, self.scale, self.ratio)\n if isinstance(self.interpolation, (tuple, list)):\n interpolation = random.choice(self.interpolation)\n else:\n interpolation = self.interpolation\n if self.second_size is None:\n return F.resized_crop(img, i, j, h, w, self.size, interpolation)\n else:\n return F.resized_crop(img, i, j, h, w, self.size, interpolation), \\\n F.resized_crop(img, i, j, h, w, self.second_size, self.second_interpolation)\n\n def __repr__(self):\n if isinstance(self.interpolation, (tuple, list)):\n interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation])\n else:\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n format_string = self.__class__.__name__ + '(size={0}'.format(self.size)\n format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))\n format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))\n format_string += ', interpolation={0}'.format(interpolate_str)\n if self.second_size is not None:\n format_string += ', second_size={0}'.format(self.second_size)\n format_string += ', second_interpolation={0}'.format(_pil_interpolation_to_str[self.second_interpolation])\n format_string += ')'\n return format_string" }, { "identifier": "RandomResizedCropAndInterpolationWithTwoPicVal", "path": "transforms.py", "snippet": "class RandomResizedCropAndInterpolationWithTwoPicVal:\n \"\"\"Crop the given PIL Image to random size and aspect ratio with random interpolation.\n\n A crop of random size (default: of 0.08 to 1.0) of the original size and a random\n aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop\n is finally resized to given size.\n This is popularly used to train the Inception networks.\n\n Args:\n size: expected output size of each edge\n scale: range of size of the origin size cropped\n ratio: range of aspect ratio of the origin aspect ratio cropped\n interpolation: Default: PIL.Image.BILINEAR\n \"\"\"\n\n def __init__(self, size, second_size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),\n interpolation='bilinear', second_interpolation='lanczos'):\n if isinstance(size, tuple):\n self.size = size\n else:\n self.size = (size, size)\n if second_size is not None:\n if isinstance(second_size, tuple):\n self.second_size = second_size\n else:\n self.second_size = (second_size, second_size)\n else:\n self.second_size = None\n if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):\n warnings.warn(\"range should be of kind (min, max)\")\n\n if interpolation == 'random':\n self.interpolation = _RANDOM_INTERPOLATION\n else:\n self.interpolation = _pil_interp(interpolation)\n self.second_interpolation = _pil_interp(second_interpolation)\n self.scale = scale\n self.ratio = ratio\n\n @staticmethod\n def get_params(img, scale, ratio):\n \"\"\"Get parameters for ``crop`` for a random sized crop.\n\n Args:\n img (PIL Image): Image to be cropped.\n scale (tuple): range of size of the origin size cropped\n ratio (tuple): range of aspect ratio of the origin aspect ratio cropped\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for a random\n sized crop.\n \"\"\"\n area = img.size[0] * img.size[1]\n\n for attempt in range(10):\n target_area = random.uniform(*scale) * area\n log_ratio = (math.log(ratio[0]), math.log(ratio[1]))\n aspect_ratio = math.exp(random.uniform(*log_ratio))\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if w <= img.size[0] and h <= img.size[1]:\n i = random.randint(0, img.size[1] - h)\n j = random.randint(0, img.size[0] - w)\n return i, j, h, w\n\n # Fallback to central crop\n in_ratio = img.size[0] / img.size[1]\n if in_ratio < min(ratio):\n w = img.size[0]\n h = int(round(w / min(ratio)))\n elif in_ratio > max(ratio):\n h = img.size[1]\n w = int(round(h * max(ratio)))\n else: # whole image\n w = img.size[0]\n h = img.size[1]\n i = (img.size[1] - h) // 2\n j = (img.size[0] - w) // 2\n return i, j, h, w\n\n def __call__(self, img):\n \"\"\"\n Args:\n img (PIL Image): Image to be cropped and resized.\n\n Returns:\n PIL Image: Randomly cropped and resized image.\n \"\"\"\n # i, j, h, w = self.get_params(img, self.scale, self.ratio)\n if isinstance(self.interpolation, (tuple, list)):\n interpolation = random.choice(self.interpolation)\n else:\n interpolation = self.interpolation\n # if self.second_size is None:\n # return F.resize(img, (256, 256), interpolation)\n # else:\n return F.resize(img, (256, 256), interpolation), F.resize(img, (128,128), self.second_interpolation)\n\n def __repr__(self):\n if isinstance(self.interpolation, (tuple, list)):\n interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation])\n else:\n interpolate_str = _pil_interpolation_to_str[self.interpolation]\n format_string = self.__class__.__name__ + '(size={0}'.format(self.size)\n format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))\n format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))\n format_string += ', interpolation={0}'.format(interpolate_str)\n if self.second_size is not None:\n format_string += ', second_size={0}'.format(self.second_size)\n format_string += ', second_interpolation={0}'.format(_pil_interpolation_to_str[self.second_interpolation])\n format_string += ')'\n return format_string" }, { "identifier": "ImageFolder", "path": "dataset_folder.py", "snippet": "class ImageFolder(DatasetFolder):\n \"\"\"A generic data loader where the images are arranged in this way: ::\n\n root/dog/xxx.png\n root/dog/xxy.png\n root/dog/xxz.png\n\n root/cat/123.png\n root/cat/nsdf3.png\n root/cat/asd932_.png\n\n Args:\n root (string): Root directory path.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n loader (callable, optional): A function to load an image given its path.\n is_valid_file (callable, optional): A function that takes path of an Image file\n and check if the file is a valid file (used to check of corrupt files)\n\n Attributes:\n classes (list): List of the class names sorted alphabetically.\n class_to_idx (dict): Dict with items (class_name, class_index).\n imgs (list): List of (image path, class_index) tuples\n \"\"\"\n\n def __init__(\n self,\n root: str,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n loader: Callable[[str], Any] = default_loader,\n is_valid_file: Optional[Callable[[str], bool]] = None,\n ):\n super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,\n transform=transform,\n target_transform=target_transform,\n is_valid_file=is_valid_file)\n self.imgs = self.samples" } ]
import os import torch import json import PIL.Image import random import pickle import numpy as np import csv import scipy.io as sio from torchvision import datasets, transforms from timm.data.constants import \ IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from transforms import RandomResizedCropAndInterpolationWithTwoPic, RandomResizedCropAndInterpolationWithTwoPicVal from timm.data import create_transform from dataset_folder import ImageFolder from timm.data.transforms import str_to_interp_mode from pathlib import Path from typing import Any, Tuple, Callable, Optional from torchvision.datasets.utils import verify_str_arg, download_and_extract_archive,check_integrity from torchvision.datasets.vision import VisionDataset from torchvision.datasets.folder import make_dataset from PIL import Image
5,804
train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'val.json') elif my_mode == 'trainval_test': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'test.json') # test else: train_list_path = None test_list_path = None self.samples = [] if train: with open(train_list_path, 'r') as f: content = json.load(f) for name in content: img_name = 'Images/'+name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) else: with open(test_list_path, 'r') as f: content = json.load(f) for name in content: img_name = 'Images/'+name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) class FGVC_car(datasets.folder.ImageFolder): def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs): self.dataset_root = root self.loader = datasets.folder.default_loader self.target_transform = None self.transform = transform if my_mode == 'train_val': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'val.json') elif my_mode == 'trainval_test': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'test.json') # test else: train_list_path = None test_list_path = None self.samples = [] if train: with open(train_list_path, 'r') as f: content = json.load(f) for name in content: img_name = name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) else: with open(test_list_path, 'r') as f: content = json.load(f) for name in content: img_name = name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) class DataAugmentationForBEiT(object): def __init__(self, args): imagenet_default_mean_and_std = args.imagenet_default_mean_and_std mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD self.common_transform = transforms.Compose([ transforms.ColorJitter(0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(p=0.5), RandomResizedCropAndInterpolationWithTwoPic( size=args.input_size, second_size=args.second_input_size, interpolation=args.train_interpolation, second_interpolation=args.second_interpolation, ), ]) self.patch_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=torch.tensor(mean), std=torch.tensor(std)) ]) if args.discrete_vae_type == "dall-e": self.visual_token_transform = transforms.Compose([ transforms.ToTensor(), map_pixels, ]) elif args.discrete_vae_type == "customized": self.visual_token_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, ), ]) else: raise NotImplementedError() def __call__(self, image): for_patches, for_visual_tokens = self.common_transform(image) return \ self.patch_transform(for_patches), self.visual_token_transform(for_visual_tokens) def __repr__(self): repr = "(DataAugmentationForBEiT,\n" repr += " common_transform = %s,\n" % str(self.common_transform) repr += " patch_transform = %s,\n" % str(self.patch_transform) repr += " visual_tokens_transform = %s,\n" % str(self.visual_token_transform) repr += ")" return repr class DataAugmentationForBEiT_val(object): def __init__(self, args): imagenet_default_mean_and_std = args.imagenet_default_mean_and_std mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD self.common_transform = transforms.Compose([
# -------------------------------------------------------- # SA2VP: Spatially Aligned-and-Adapted Visual Prompt code # reference: # BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254) # Github source: https://github.com/microsoft/unilm/tree/master/beit # Based on timm # https://github.com/rwightman/pytorch-image-models/tree/master/timm # --------------------------------------------------------' # for food 101 # for add new datesets ========================= class DTD(VisionDataset): """`Describable Textures Dataset (DTD) <https://www.robots.ox.ac.uk/~vgg/data/dtd/>`_. Args: root (string): Root directory of the dataset. split (string, optional): The dataset split, supports ``"train"`` (default), ``"val"``, or ``"test"``. partition (int, optional): The dataset partition. Should be ``1 <= partition <= 10``. Defaults to ``1``. .. note:: The partition only changes which split each image belongs to. Thus, regardless of the selected partition, combining all splits will result in all images. transform (callable, optional): A function/transform that takes in a PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``. target_transform (callable, optional): A function/transform that takes in the target and transforms it. download (bool, optional): If True, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. Default is False. """ _URL = "https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz" _MD5 = "fff73e5086ae6bdbea199a49dfb8a4c1" def __init__( self, root: str, split: str = "train", partition: int = 1, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, ) -> None: self._split = verify_str_arg(split, "split", ("train", "val", "test")) if not isinstance(partition, int) and not (1 <= partition <= 10): raise ValueError( f"Parameter 'partition' should be an integer with `1 <= partition <= 10`, " f"but got {partition} instead" ) self._partition = partition super().__init__(root, transform=transform, target_transform=target_transform) self._base_folder = Path(root) self._data_folder = self._base_folder / "dtd" self._meta_folder = self._data_folder / "labels" self._images_folder = self._data_folder / "images" if download: self._download() if not self._check_exists(): raise RuntimeError("Dataset not found. You can use download=True to download it") self._image_files = [] classes = [] with open(self._meta_folder / f"{self._split}{self._partition}.txt") as file: for line in file: cls, name = line.strip().split("/") self._image_files.append(self._images_folder.joinpath(cls, name)) classes.append(cls) self.classes = sorted(set(classes)) self.class_to_idx = dict(zip(self.classes, range(len(self.classes)))) self._labels = [self.class_to_idx[cls] for cls in classes] def __len__(self) -> int: return len(self._image_files) def __getitem__(self, idx): image_file, label = self._image_files[idx], self._labels[idx] image = PIL.Image.open(image_file).convert("RGB") if self.transform: image = self.transform(image) if self.target_transform: label = self.target_transform(label) # sample = { # "image": image, # "label": label # } return image, label def extra_repr(self) -> str: return f"split={self._split}, partition={self._partition}" def _check_exists(self) -> bool: return os.path.exists(self._data_folder) and os.path.isdir(self._data_folder) def _download(self) -> None: if self._check_exists(): return download_and_extract_archive(self._URL, download_root=str(self._base_folder), md5=self._MD5) classes = [ 'red and white circle 20 kph speed limit', 'red and white circle 30 kph speed limit', 'red and white circle 50 kph speed limit', 'red and white circle 60 kph speed limit', 'red and white circle 70 kph speed limit', 'red and white circle 80 kph speed limit', 'end / de-restriction of 80 kph speed limit', 'red and white circle 100 kph speed limit', 'red and white circle 120 kph speed limit', 'red and white circle red car and black car no passing', 'red and white circle red truck and black car no passing', 'red and white triangle road intersection warning', 'white and yellow diamond priority road', 'red and white upside down triangle yield right-of-way', 'stop', 'empty red and white circle', 'red and white circle no truck entry', 'red circle with white horizonal stripe no entry', 'red and white triangle with exclamation mark warning', 'red and white triangle with black left curve approaching warning', 'red and white triangle with black right curve approaching warning', 'red and white triangle with black double curve approaching warning', 'red and white triangle rough / bumpy road warning', 'red and white triangle car skidding / slipping warning', 'red and white triangle with merging / narrow lanes warning', 'red and white triangle with person digging / construction / road work warning', 'red and white triangle with traffic light approaching warning', 'red and white triangle with person walking warning', 'red and white triangle with child and person walking warning', 'red and white triangle with bicyle warning', 'red and white triangle with snowflake / ice warning', 'red and white triangle with deer warning', 'white circle with gray strike bar no speed limit', 'blue circle with white right turn arrow mandatory', 'blue circle with white left turn arrow mandatory', 'blue circle with white forward arrow mandatory', 'blue circle with white forward or right turn arrow mandatory', 'blue circle with white forward or left turn arrow mandatory', 'blue circle with white keep right arrow mandatory', 'blue circle with white keep left arrow mandatory', 'blue circle with white arrows indicating a traffic circle', 'white circle with gray strike bar indicating no passing for cars has ended', 'white circle with gray strike bar indicating no passing for trucks has ended', ] class GTSRB(VisionDataset): """`German Traffic Sign Recognition Benchmark (GTSRB) <https://benchmark.ini.rub.de/>`_ Dataset. Args: root (string): Root directory of the dataset. split (string, optional): The dataset split, supports ``"train"`` (default), or ``"test"``. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``. target_transform (callable, optional): A function/transform that takes in the target and transforms it. download (bool, optional): If True, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. """ def __init__( self, root: str, # args, split: str = "train", percentage: float = 0.8, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, ) -> None: super().__init__(root, transform=transform, target_transform=target_transform) self._split = verify_str_arg(split, "split", ("train", "val", "test")) self._base_folder = Path(root) / "gtsrb" self._target_folder = ( self._base_folder / "GTSRB" / ("Training" if self._split in ["train", "val"] else "Final_Test/Images") ) if download: self.download() if not self._check_exists(): raise RuntimeError("Dataset not found. You can use download=True to download it") if self._split in ["train", "val"]: samples = make_dataset(str(self._target_folder), extensions=(".ppm",)) else: with open(self._base_folder / "GT-final_test.csv") as csv_file: samples = [ (str(self._target_folder / row["Filename"]), int(row["ClassId"])) for row in csv.DictReader(csv_file, delimiter=";", skipinitialspace=True) ] # self._samples = samples # self.transform = transform # self.target_transform = target_transform if split in ["train", "val"]: random.shuffle(samples) else: self._samples = samples if split == "train": self._samples = samples[:int(percentage*len(samples))] if split == "val": self._samples = samples[int(percentage*len(samples)):] self.classes = ['a zoomed in photo of a {} traffic sign.'.format(class_name) \ for class_name in classes] def __len__(self) -> int: return len(self._samples) def __getitem__(self, index: int) -> Tuple[Any, Any]: path, target = self._samples[index] sample = PIL.Image.open(path).convert("RGB") if self.transform is not None: sample = self.transform(sample) if self.target_transform is not None: target = self.target_transform(target) #data = { #"image": sample, #"label": target #} return sample,target def _check_exists(self) -> bool: return self._target_folder.is_dir() def download(self) -> None: if self._check_exists(): return base_url = "https://sid.erda.dk/public/archives/daaeac0d7ce1152aea9b61d9f1e19370/" if self._split in ["train", "val"]: download_and_extract_archive( f"{base_url}GTSRB-Training_fixed.zip", download_root=str(self._base_folder), md5="513f3c79a4c5141765e10e952eaa2478", ) else: download_and_extract_archive( f"{base_url}GTSRB_Final_Test_Images.zip", download_root=str(self._base_folder), md5="c7e4e6327067d32654124b0fe9e82185", ) download_and_extract_archive( f"{base_url}GTSRB_Final_Test_GT.zip", download_root=str(self._base_folder), md5="fe31e9c9270bbcd7b84b7f21a9d9d9e5", ) class Food101(VisionDataset): """`The Food-101 Data Set <https://data.vision.ee.ethz.ch/cvl/datasets_extra/food-101/>`_. The Food-101 is a challenging data set of 101 food categories, with 101'000 images. For each class, 250 manually reviewed test images are provided as well as 750 training images. On purpose, the training images were not cleaned, and thus still contain some amount of noise. This comes mostly in the form of intense colors and sometimes wrong labels. All images were rescaled to have a maximum side length of 512 pixels. Args: root (string): Root directory of the dataset. split (string, optional): The dataset split, supports ``"train"`` (default) and ``"test"``. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop``. target_transform (callable, optional): A function/transform that takes in the target and transforms it. download (bool, optional): If True, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. Default is False. """ _URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz" _MD5 = "85eeb15f3717b99a5da872d97d918f87" def __init__( self, root: str, # args, split: str = "train", percentage: float = 0.8, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, ) -> None: super().__init__(root, transform=transform, target_transform=target_transform) self._split = verify_str_arg(split, "split", ("train", "val", "test")) self._base_folder = Path(self.root) / "food-101" self._meta_folder = self._base_folder / "meta" self._images_folder = self._base_folder / "images" if download: self._download() if not self._check_exists(): raise RuntimeError("Dataset not found. You can use download=True to download it") self._labels = [] self._image_files = [] split_name = "test" if split == "test" else "train" with open(self._meta_folder / f"{split_name}.json") as f: metadata = json.loads(f.read()) self.classes = sorted(metadata.keys()) self.class_to_idx = dict(zip(self.classes, range(len(self.classes)))) for class_label, im_rel_paths in metadata.items(): self._labels += [self.class_to_idx[class_label]] * len(im_rel_paths) self._image_files += [ self._images_folder.joinpath(*f"{im_rel_path}.jpg".split("/")) for im_rel_path in im_rel_paths ] if split in ["train", "val"]: data_zip = list(zip(self._labels, self._image_files)) random.shuffle(data_zip) self._labels[:], self._image_files[:] = zip(*data_zip) del data_zip if split == "train": self._labels = self._labels[:int(percentage*len(self._labels))] self._image_files = self._image_files[:int(percentage*len(self._image_files))] if split == "val": self._labels = self._labels[int(percentage*len(self._labels)):] self._image_files = self._image_files[int(percentage*len(self._image_files)):] def __len__(self) -> int: return len(self._image_files) def __getitem__(self, idx) -> Tuple[Any, Any]: image_file, label = self._image_files[idx], self._labels[idx] image = PIL.Image.open(image_file).convert("RGB") if self.transform: image = self.transform(image) if self.target_transform: label = self.target_transform(label) sample = { "image": image, "label": label } return image, label def extra_repr(self) -> str: return f"split={self._split}" def _check_exists(self) -> bool: return all(folder.exists() and folder.is_dir() for folder in (self._meta_folder, self._images_folder)) def _download(self) -> None: if self._check_exists(): return download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5) class CIFAR10(VisionDataset): """`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. Args: root (string): Root directory of dataset where directory ``cifar-10-batches-py`` exists or will be saved to if download is set to True. train (bool, optional): If True, creates dataset from training set, otherwise creates from test set. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. """ base_folder = "cifar-10-batches-py" url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" filename = "cifar-10-python.tar.gz" tgz_md5 = "c58f30108f718f92721af3b95e74349a" train_list = [ ["data_batch_1", "c99cafc152244af753f735de768cd75f"], ["data_batch_2", "d4bba439e000b95fd0a9bffe97cbabec"], ["data_batch_3", "54ebc095f3ab1f0389bbae665268c751"], ["data_batch_4", "634d18415352ddfa80567beed471001a"], ["data_batch_5", "482c414d41f54cd18b22e5b47cb7c3cb"], ] test_list = [ ["test_batch", "40351d587109b95175f43aff81a1287e"], ] meta = { "filename": "batches.meta", "key": "label_names", "md5": "5ff9c542aee3614f3951f8cda6e48888", } def __init__( self, root: str, # args, split: str = "train", percentage: float = 0.8, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, ) -> None: super().__init__(root, transform=transform, target_transform=target_transform) self.split = split # training set or test set if download: self.download() if not self._check_integrity(): raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") if self.split == "train" or self.split == "val": downloaded_list = self.train_list else: downloaded_list = self.test_list self.data: Any = [] self.targets = [] # now load the picked numpy arrays for file_name, checksum in downloaded_list: file_path = os.path.join(self.root, self.base_folder, file_name) with open(file_path, "rb") as f: entry = pickle.load(f, encoding="latin1") self.data.append(entry["data"]) if "labels" in entry: self.targets.extend(entry["labels"]) else: self.targets.extend(entry["fine_labels"]) self.data = np.vstack(self.data).reshape(-1, 3, 32, 32) self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC if self.split == "train": self.data = self.data[:int(percentage*len(self.data))] self.targets = self.targets[:int(percentage*len(self.targets))] if self.split == "val": self.data = self.data[int(percentage*len(self.data)):] self.targets = self.targets[int(percentage*len(self.targets)):] self._load_meta() def _load_meta(self) -> None: path = os.path.join(self.root, self.base_folder, self.meta["filename"]) if not check_integrity(path, self.meta["md5"]): raise RuntimeError("Dataset metadata file not found or corrupted. You can use download=True to download it") with open(path, "rb") as infile: data = pickle.load(infile, encoding="latin1") self.classes = data[self.meta["key"]] self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)} def __getitem__(self, index: int) -> Tuple[Any, Any]: """ Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. """ img, target = self.data[index], self.targets[index] # doing this so that it is consistent with all other datasets # to return a PIL Image img = Image.fromarray(img) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) #sample = { #"image": img, #"label": target #} return img, target def __len__(self) -> int: return len(self.data) def _check_integrity(self) -> bool: root = self.root for fentry in self.train_list + self.test_list: filename, md5 = fentry[0], fentry[1] fpath = os.path.join(root, self.base_folder, filename) if not check_integrity(fpath, md5): return False return True def download(self) -> None: if self._check_integrity(): logger.info("Files already downloaded and verified") return download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5) def extra_repr(self) -> str: split = self.split return f"Split: {split}" class CIFAR100(CIFAR10): """`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. This is a subclass of the `CIFAR10` Dataset. """ base_folder = "cifar-100-python" url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz" filename = "cifar-100-python.tar.gz" tgz_md5 = "eb9058c3a382ffc7106e4002c42a8d85" train_list = [ ["train", "16019d7e3df5f24257cddd939b257f8d"], ] test_list = [ ["test", "f0ef6b0ae62326f3e7ffdfab6717acfc"], ] meta = { "filename": "meta", "key": "fine_label_names", "md5": "7973b15100ade9c7d40fb424638fde48", } class SVHN(VisionDataset): """`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset. Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset, we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which expect the class labels to be in the range `[0, C-1]` .. warning:: This class needs `scipy <https://docs.scipy.org/doc/>`_ to load data from `.mat` format. Args: root (string): Root directory of dataset where directory ``SVHN`` exists. split (string): One of {'train', 'test', 'extra'}. Accordingly dataset is selected. 'extra' is Extra training set. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.RandomCrop`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. download (bool, optional): If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again. """ split_list = { "train": [ "http://ufldl.stanford.edu/housenumbers/train_32x32.mat", "train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373", ], "val": [ "http://ufldl.stanford.edu/housenumbers/train_32x32.mat", "train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373", ], "test": [ "http://ufldl.stanford.edu/housenumbers/test_32x32.mat", "test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3", ], "extra": [ "http://ufldl.stanford.edu/housenumbers/extra_32x32.mat", "extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7", ], } def __init__( self, root: str, # args, split: str = "train", percentage: float = 0.8, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, ) -> None: super().__init__(root, transform=transform, target_transform=target_transform) self.split = verify_str_arg(split, "split", tuple(self.split_list.keys())) self.url = self.split_list[split][0] self.filename = self.split_list[split][1] self.file_md5 = self.split_list[split][2] if download: self.download() if not self._check_integrity(): raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") # import here rather than at top of file because this is # an optional dependency for torchvision # reading(loading) mat file as array loaded_mat = sio.loadmat(os.path.join(self.root, self.filename)) self.data = loaded_mat["X"] # loading from the .mat file gives an np array of type np.uint8 # converting to np.int64, so that we have a LongTensor after # the conversion from the numpy array # the squeeze is needed to obtain a 1D tensor self.labels = loaded_mat["y"].astype(np.int64).squeeze() # the svhn dataset assigns the class label "10" to the digit 0 # this makes it inconsistent with several loss functions # which expect the class labels to be in the range [0, C-1] np.place(self.labels, self.labels == 10, 0) self.data = np.transpose(self.data, (3, 2, 0, 1)) if split == "train": self.labels = self.labels[:int(percentage*len(self.labels))] self.data = self.data[:int(percentage*len(self.data))] if split == "val": self.labels = self.labels[int(percentage*len(self.labels)):] self.data = self.data[int(percentage*len(self.data)):] self.classes = [str(class_name) for class_name in sorted(list(set(self.labels)))] def __getitem__(self, index: int) -> Tuple[Any, Any]: """ Args: index (int): Index Returns: tuple: (image, target) where target is index of the target class. """ img, target = self.data[index], int(self.labels[index]) # doing this so that it is consistent with all other datasets # to return a PIL Image img = Image.fromarray(np.transpose(img, (1, 2, 0))) if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) #sample = { #"image": img, #"label": target #} return img, target def __len__(self) -> int: return len(self.data) def _check_integrity(self) -> bool: root = self.root md5 = self.split_list[self.split][2] fpath = os.path.join(root, self.filename) return check_integrity(fpath, md5) def download(self) -> None: md5 = self.split_list[self.split][2] download_url(self.url, self.root, self.filename, md5) def extra_repr(self) -> str: return "Split: {split}".format(**self.__dict__) # end ========================= """ class Food101(VisionDataset): _URL = "http://data.vision.ee.ethz.ch/cvl/food-101.tar.gz" _MD5 = "85eeb15f3717b99a5da872d97d918f87" def __init__( self, root: str, split: str = "train", transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, download: bool = False, ) -> None: super().__init__(root, transform=transform, target_transform=target_transform) self._split = verify_str_arg(split, "split", ("train", "test")) self._base_folder = Path(self.root) / "food-101" self._meta_folder = self._base_folder / "meta" self._images_folder = self._base_folder / "images" if download: self._download() if not self._check_exists(): raise RuntimeError("Dataset not found. You can use download=True to download it") self._labels = [] self._image_files = [] with open(self._meta_folder / f"{split}.json") as f: metadata = json.loads(f.read()) self.classes = sorted(metadata.keys()) self.class_to_idx = dict(zip(self.classes, range(len(self.classes)))) for class_label, im_rel_paths in metadata.items(): self._labels += [self.class_to_idx[class_label]] * len(im_rel_paths) self._image_files += [ self._images_folder.joinpath(*f"{im_rel_path}.jpg".split("/")) for im_rel_path in im_rel_paths ] def __len__(self) -> int: return len(self._image_files) def __getitem__(self, idx) -> Tuple[Any, Any]: image_file, label = self._image_files[idx], self._labels[idx] image = PIL.Image.open(image_file).convert("RGB") if self.transform: image = self.transform(image) if self.target_transform: label = self.target_transform(label) return image, label def extra_repr(self) -> str: return f"split={self._split}" def _check_exists(self) -> bool: return all(folder.exists() and folder.is_dir() for folder in (self._meta_folder, self._images_folder)) def _download(self) -> None: if self._check_exists(): return download_and_extract_archive(self._URL, download_root=self.root, md5=self._MD5) """ class VTAB(datasets.folder.ImageFolder): def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs): self.dataset_root = root self.loader = datasets.folder.default_loader self.target_transform = None self.transform = transform if my_mode == 'train_val': train_list_path = os.path.join(self.dataset_root, 'train800.txt') test_list_path = os.path.join(self.dataset_root, 'val200.txt') elif my_mode == 'trainval_test': train_list_path = os.path.join(self.dataset_root, 'train800val200.txt') test_list_path = os.path.join(self.dataset_root, 'test.txt') # test else: train_list_path = os.path.join(self.dataset_root, 'train800val200.txt') test_list_path = os.path.join(self.dataset_root, 'val200.txt') self.samples = [] if train: with open(train_list_path, 'r') as f: for line in f: img_name = line.split(' ')[0] label = int(line.split(' ')[1]) self.samples.append((os.path.join(root,img_name), label)) else: with open(test_list_path, 'r') as f: for line in f: img_name = line.split(' ')[0] label = int(line.split(' ')[1]) self.samples.append((os.path.join(root,img_name), label)) class FGVC_cub(datasets.folder.ImageFolder): def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs): self.dataset_root = root self.loader = datasets.folder.default_loader self.target_transform = None self.transform = transform if my_mode == 'train_val': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'val.json') elif my_mode == 'trainval_test': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'test.json') # test else: train_list_path = None test_list_path = None self.samples = [] if train: with open(train_list_path, 'r') as f: content = json.load(f) for name in content: img_name = 'images/'+name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) else: with open(test_list_path, 'r') as f: content = json.load(f) for name in content: img_name = 'images/'+name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) class FGVC_bird(datasets.folder.ImageFolder): def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs): self.dataset_root = root self.loader = datasets.folder.default_loader self.target_transform = None self.transform = transform if my_mode == 'train_val': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'val.json') elif my_mode == 'trainval_test': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'test.json') # test else: train_list_path = None test_list_path = None self.samples = [] if train: with open(train_list_path, 'r') as f: content = json.load(f) for name in content: img_name = 'images/'+name label = int(content[name]) self.samples.append((os.path.join(root,img_name), label)) else: with open(test_list_path, 'r') as f: content = json.load(f) for name in content: img_name = 'images/'+name label = int(content[name]) self.samples.append((os.path.join(root,img_name), label)) class FGVC_flower(datasets.folder.ImageFolder): def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs): self.dataset_root = root self.loader = datasets.folder.default_loader self.target_transform = None self.transform = transform if my_mode == 'train_val': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'val.json') elif my_mode == 'trainval_test': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'test.json') # test else: train_list_path = None test_list_path = None self.samples = [] if train: with open(train_list_path, 'r') as f: content = json.load(f) for name in content: img_name = name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) else: with open(test_list_path, 'r') as f: content = json.load(f) for name in content: img_name = name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) class FGVC_dog(datasets.folder.ImageFolder): def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs): self.dataset_root = root self.loader = datasets.folder.default_loader self.target_transform = None self.transform = transform if my_mode == 'train_val': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'val.json') elif my_mode == 'trainval_test': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'test.json') # test else: train_list_path = None test_list_path = None self.samples = [] if train: with open(train_list_path, 'r') as f: content = json.load(f) for name in content: img_name = 'Images/'+name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) else: with open(test_list_path, 'r') as f: content = json.load(f) for name in content: img_name = 'Images/'+name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) class FGVC_car(datasets.folder.ImageFolder): def __init__(self, root, my_mode=None, train=True, transform=None, target_transform=None, mode=None,is_individual_prompt=False,**kwargs): self.dataset_root = root self.loader = datasets.folder.default_loader self.target_transform = None self.transform = transform if my_mode == 'train_val': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'val.json') elif my_mode == 'trainval_test': train_list_path = os.path.join(self.dataset_root, 'train.json') test_list_path = os.path.join(self.dataset_root, 'test.json') # test else: train_list_path = None test_list_path = None self.samples = [] if train: with open(train_list_path, 'r') as f: content = json.load(f) for name in content: img_name = name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) else: with open(test_list_path, 'r') as f: content = json.load(f) for name in content: img_name = name label = int(content[name])-1 self.samples.append((os.path.join(root,img_name), label)) class DataAugmentationForBEiT(object): def __init__(self, args): imagenet_default_mean_and_std = args.imagenet_default_mean_and_std mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD self.common_transform = transforms.Compose([ transforms.ColorJitter(0.4, 0.4, 0.4), transforms.RandomHorizontalFlip(p=0.5), RandomResizedCropAndInterpolationWithTwoPic( size=args.input_size, second_size=args.second_input_size, interpolation=args.train_interpolation, second_interpolation=args.second_interpolation, ), ]) self.patch_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=torch.tensor(mean), std=torch.tensor(std)) ]) if args.discrete_vae_type == "dall-e": self.visual_token_transform = transforms.Compose([ transforms.ToTensor(), map_pixels, ]) elif args.discrete_vae_type == "customized": self.visual_token_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize( mean=IMAGENET_INCEPTION_MEAN, std=IMAGENET_INCEPTION_STD, ), ]) else: raise NotImplementedError() def __call__(self, image): for_patches, for_visual_tokens = self.common_transform(image) return \ self.patch_transform(for_patches), self.visual_token_transform(for_visual_tokens) def __repr__(self): repr = "(DataAugmentationForBEiT,\n" repr += " common_transform = %s,\n" % str(self.common_transform) repr += " patch_transform = %s,\n" % str(self.patch_transform) repr += " visual_tokens_transform = %s,\n" % str(self.visual_token_transform) repr += ")" return repr class DataAugmentationForBEiT_val(object): def __init__(self, args): imagenet_default_mean_and_std = args.imagenet_default_mean_and_std mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD self.common_transform = transforms.Compose([
RandomResizedCropAndInterpolationWithTwoPicVal(
1
2023-12-12 13:19:17+00:00
8k
crashdev226/freelancer-create-account-bot
bot_create.py
[ { "identifier": "FreelancerBot", "path": "FreelancerBot.py", "snippet": "class FreelancerBot:\n # Constructor\n def __init__(self):\n pass\n def create(self): \n profile = None\n with open(\"./profile.json\", \"r+\") as file:\n profile = json.load(file)[\"freelancer\"]\n print(profile[\"skills\"])\n pag.FAILSAFE = False\n if pag.confirm(\"Are you ready? Please go to browser.\") != \"OK\":\n exit()\n pag.hotkey(\"ctrl\", \"t\")\n pag.typewrite(\"freelancer.com\\n\")\n time.sleep(3)\n pag.click(1348, 107)\n pag.hotkey(\"ctrl\", \"t\")\n pag.typewrite(\"yopmail.com\\n\")\n time.sleep(3)\n pag.click(523, 552)\n time.sleep(2)\n\n pag.click(831, 572)\n time.sleep(1)\n pag.click(1289, 710)\n time.sleep(1)\n pag.click(1073, 570)\n time.sleep(0.5)\n pag.hotkey(\"ctrl\", \"shift\", \"tab\")\n\n time.sleep(0.5)\n pag.click(870, 396)\n pag.hotkey(\"ctrl\", \"v\")\n pag.click(859, 477)\n pag.typewrite(\"pwd1234!@#$\")\n time.sleep(0.5)\n pag.click(807, 544)\n time.sleep(0.5)\n pag.click(807, 544)\n time.sleep(0.5)\n pag.click(959, 611)\n\n # Page 2\n time.sleep(1.5)\n pag.click(912, 395)\n time.sleep(1)\n pag.click(939, 484)\n time.sleep(1)\n pag.click(963, 336)\n if pag.confirm(\"Continue?\") != \"OK\":\n exit()\n time.sleep(5)\n # pag.click(557, 450)\n # time.sleep(4)\n # pag.click(557, 450)\n\n # skill input for loop\n for skill in profile[\"skills\"]:\n time.sleep(0.5)\n pag.click(472, 295, 3)\n pag.typewrite(skill)\n time.sleep(1)\n pag.click(853, 444)\n if pag.confirm(\"Continue?\") != \"OK\":\n exit()\n pag.click(1466, 968)\n # Page 3\n time.sleep(2)\n pag.click(1093, 742)\n # Page 4\n time.sleep(2)\n pag.click(784, 614)\n pag.typewrite(profile[\"firstName\"])\n # time.sleep(0.5)\n pag.click(775, 712)\n pag.typewrite(profile[\"lastName\"])\n # time.sleep(2)\n pag.click(1189, 848)\n # Page 5\n time.sleep(0.5)\n pag.click(1187, 856)\n # Page 5-2\n time.sleep(0.5)\n pag.click(725, 549)\n pyperclip.copy(profile[\"heading\"])\n pag.hotkey(\"ctrl\", \"v\")\n # time.sleep(2)\n pag.click(795, 677)\n pyperclip.copy(profile[\"description\"])\n pag.hotkey(\"ctrl\", \"v\")\n time.sleep(0.5)\n pag.click(1189, 848)\n # Page 6\n time.sleep(2.5)\n pag.click(739, 667)\n pag.typewrite(profile[\"birth\"])\n time.sleep(0.5)\n pag.click(1185, 611)\n time.sleep(0.5)\n pag.click(1185, 811)\n # Page 7\n if pag.confirm(\"Continue?\") != \"OK\":\n exit()\n time.sleep(0.8)\n pag.click(1189, 924)\n # Page 8\n time.sleep(0.5)\n pag.hotkey(\"ctrl\", \"tab\")\n time.sleep(4)\n pag.click(525, 211)\n time.sleep(3)\n pag.click(653, 454)\n # Emali Verify\n time.sleep(4)\n pag.hotkey(\"ctrl\", \"w\")\n time.sleep(0.5)\n pag.hotkey(\"ctrl\", \"w\")\n time.sleep(0.5)\n pag.click(1185, 666)\n # Page 9\n time.sleep(1)\n pag.click(1432, 894)\n # Page 10\n time.sleep(1)\n pag.click(1452, 1015)\n # Page 11\n time.sleep(5)\n pag.click(816, 544)\n time.sleep(5)\n pag.click(1480, 358) # showcase\n\n pag.alert(\"Done\")\n # while True:\n # print(pag.position())" }, { "identifier": "UpworkBot", "path": "UpworkBot.py", "snippet": "class UpworkBot:\n # Constructor\n def __init__(self):\n pass\n def create(self): \n profile = None\n with open(\"./profile.json\", \"r+\") as file:\n profile = json.load(file)[\"upwork\"]\n print(profile[\"skills\"])\n pag.FAILSAFE = False\n if pag.confirm(\"Are you ready? Please go to browser.\") != \"OK\":\n exit()\n pag.hotkey(\"ctrl\", \"t\")\n pag.typewrite(\"upwork.com\\n\")\n time.sleep(1)\n pag.click(1770,53)\n time.sleep(1)\n pag.click(1750,260)\n time.sleep(5)\n pag.click(1285,739)\n time.sleep(1)\n pag.click(1675,110)\n time.sleep(1)\n pag.click(1100,380)\n time.sleep(1)\n pag.click(952,514)\n pag.hotkey(\"ctrl\", \"t\")\n pag.typewrite(\"addy.io\\n\")\n time.sleep(1)\n pag.click(1438,122)\n time.sleep(1)\n pag.click(52,272)\n time.sleep(1)\n pag.click(1674,195)\n time.sleep(1)\n pag.click(891,469)\n time.sleep(1)\n pag.click(833,631)\n time.sleep(1)\n pag.click(773,724)\n time.sleep(0.5)\n pag.typewrite(\"louis\")\n time.sleep(0.5)\n pag.click(830,760)\n time.sleep(1)\n pag.click(787,797)\n time.sleep(1)\n pag.click(648,401)\n time.sleep(0.5)\n pag.hotkey(\"ctrl\", \"w\")\n time.sleep(0.5)\n pag.click(718,513)\n pag.hotkey(\"ctrl\", \"v\")\n pag.click(708,453)\n pag.typewrite(\"Louis\")\n pag.click(1085,450)\n pag.typewrite(\"Winkler\")\n pag.click(830,569)\n pag.typewrite(\"pwd1234!@#$\")\n time.sleep(0.5)\n pag.click(661,690)\n time.sleep(0.5)\n pag.click(665,735)\n time.sleep(0.5)\n pag.click(969,808) \n if pag.confirm(\"Verify email and click Next\") != \"OK\":\n exit()\n pag.click(383,704)\n time.sleep(1)\n pag.click(1393,587)\n time.sleep(1)\n pag.click(1860,1000)\n time.sleep(1)\n pag.click(800,600)\n time.sleep(1)\n pag.click(1860,1000) \n time.sleep(1)\n pag.click(512,541)\n time.sleep(1)\n pag.click(343,745)\n time.sleep(1)\n pag.click(1860,1000)\n time.sleep(1)\n pag.click(572,545)\n time.sleep(1)\n pag.click(412,483)\n pag.typewrite(profile[\"heading\"])\n pag.click(1860,1000)\n time.sleep(1)\n \n #Experience - 1\n pag.click(570,600)\n time.sleep(1)\n pag.click(722,265)\n pag.typewrite(\"web\")\n time.sleep(1)\n pag.click(674,358)\n time.sleep(0.5)\n pag.click(660,362)\n pag.typewrite(profile[\"experience\"][0][\"title\"])\n pag.click(690,457)\n pag.typewrite(profile[\"experience\"][0][\"city\"])\n # if isinstance(profile[\"experience\"][0][\"country\"], str):\n pag.click(1075,458)\n time.sleep(0.5)\n pag.click(1080,520)\n pag.typewrite(profile[\"experience\"][0][\"country\"])#end if\n time.sleep(1)\n pag.click(1039,565)\n time.sleep(0.5)\n pag.click(626,497)\n time.sleep(0.5)\n pag.click(730,584)\n time.sleep(0.5)\n pag.click(680,637)\n time.sleep(0.5)\n pag.click(878,583)\n time.sleep(0.5)\n pag.click(840,690)\n time.sleep(0.5)\n pag.click(705,714)\n pag.typewrite(profile[\"experience\"][0][\"description\"])\n time.sleep(0.5)\n pag.click(1257,937)\n \n #Experience - 2\n time.sleep(0.5)\n pag.click(337,605)\n time.sleep(1)\n pag.click(737,270)\n time.sleep(0.5)\n pag.click(715,357)\n time.sleep(0.5)\n pag.click(697,365)\n pag.typewrite(profile[\"experience\"][1][\"title\"])\n pag.click(668,455)\n pag.typewrite(profile[\"experience\"][1][\"city\"])\n time.sleep(0.5)\n pag.click(684,600)\n time.sleep(0.5)\n pag.click(684,645)\n time.sleep(0.5)\n pag.click(865,603)\n time.sleep(0.5)\n pag.click(845,809)\n time.sleep(0.5)\n pag.click(1024,604)\n time.sleep(0.5)\n pag.click(1012,646)\n time.sleep(0.5)\n pag.click(1189,597)\n time.sleep(0.5)\n pag.click(1183,711)\n time.sleep(0.5)\n pag.click(822,719)\n pag.typewrite(profile[\"experience\"][1][\"description\"])\n time.sleep(0.5)\n pag.click(1264,935)\n time.sleep(1)\n pag.click(1778,1002)#Next Pgae\n time.sleep(1)\n pag.click(544,548)\n time.sleep(1)\n pag.click(772,293)\n pyperclip.copy(profile[\"education\"][\"university\"])\n pag.hotkey(\"ctrl\", \"v\")\n time.sleep(1.5)\n pag.click(711,352)\n time.sleep(0.5)\n pag.click(688,388)\n time.sleep(0.5)\n pag.typewrite(profile[\"education\"][\"degree\"])\n pag.click(721,423)\n time.sleep(0.5)\n pag.click(716,480)\n time.sleep(0.5)\n pag.typewrite(profile[\"education\"][\"field\"])\n time.sleep(1)\n pag.click(752,529)\n time.sleep(0.5)\n pag.click(735,571)\n time.sleep(0.5)\n pag.typewrite(profile[\"education\"][\"start\"])\n time.sleep(0.5)\n pag.click(685,685)\n time.sleep(0.5)\n pag.click(1092,576)\n time.sleep(0.5)\n pag.typewrite(profile[\"education\"][\"end\"])\n time.sleep(0.5)\n pag.click(1054,691)\n time.sleep(0.5)\n pag.click(826,716)\n pag.typewrite(profile[\"education\"][\"description\"])\n time.sleep(0.5)\n pag.click(1263,905)\n time.sleep(0.5)\n pag.click(1820,999)\n time.sleep(0.5)\n pag.click(1039,541)\n time.sleep(0.5)\n pag.click(1054,712)\n time.sleep(0.5)\n pag.click(1818,1004)#Next Page\n time.sleep(1)\n pag.click(478,509)\n time.sleep(0.5)\n for i in range(0,profile[\"skills\"].len()):\n pag.typewrite(profile[\"skills\"][i])\n time.sleep(0.5)\n pag.press('down')\n time.sleep(0.5)\n pag.typewrite('\\n')\n time.sleep(0.5)\n pag.click(1800,1003)#Next Page\n time.sleep(0.5)\n pag.click(534,543)\n pyperclip.copy(profile[\"description\"])\n pag.hotkey(\"ctrl\", \"v\")\n time.sleep(0.5)\n pag.click(1793,1001)\n time.sleep(0.5)\n pag.click(394,537)\n time.sleep(0.5)\n pag.click(1800,1000)#Next Page\n time.sleep(0.5)\n pag.click(1509,445)\n time.sleep(0.5)\n pag.click(1800,1000)#Next Page\n time.sleep(0.5)\n pag.click(682,599)\n pag.typewrite(profile[\"address\"])\n time.sleep(0.5)\n pag.click(633,687)\n time.sleep(0.5)\n pag.typewrite(profile[\"city\"])\n time.sleep(1)\n pag.click(637,748)\n time.sleep(0.5)\n pag.click(1310,686)\n pag.typewrite(profile[\"zip\"])\n pag.click(781,775)\n pag.typewrite(profile[\"phone\"])\n time.sleep(0.5)\n pag.click(424,507)\n time.sleep(0.5)\n pag.click(773,477)\n time.sleep(3.5)\n pag.click(897,170,2)#here\n time.sleep(1.5)\n pag.click(1226,830)\n time.sleep(1.5)\n pag.click(1800,1000)#Next Page\n time.sleep(1)\n pag.click(471,408)\n time.sleep(1)\n pag.click(900,550)\n time.sleep(5)\n for j in range(profile[\"portfolio\"].len()):\n pag.click(1582,315)#blank void\n time.sleep(0.5)\n for i in range(25):\n pag.hotkey(\"tab\")\n time.sleep(0.2)\n pag.typewrite(\"\\n\")\n time.sleep(1)\n pag.click(867,318)\n pag.typewrite(profile[\"portfolio\"][j][\"title\"])\n pag.click(791,516)\n pag.typewrite(profile[\"portfolio\"][j][\"date\"])\n time.sleep(0.5)\n pag.click(1296,641)\n time.sleep(2.5)\n pag.click(837,445)\n time.sleep(0.5)\n pag.click(1339,776)\n time.sleep(2)\n pag.click(827,982)\n pag.typewrite(profile[\"portfolio\"][j][\"heading\"])\n pag.click(825,871)\n pag.typewrite(profile[\"portfolio\"][j][\"url\"])\n pag.click(1290,697)\n time.sleep(0.5)\n pag.click(967,731)\n time.sleep(0.5)\n pag.click(865,766)\n time.sleep(0.5)\n pag.click(865,766) #repeat?\n time.sleep(0.5)\n pag.click(872,799)\n time.sleep(0.5)\n pag.click(907,331) #browse click\n time.sleep(2.5)\n pag.click(362,472)\n pag.typewrite(profile[\"portfolio\"][j][\"file\"])\n time.sleep(0.5)\n pag.click(791,508) \n if pag.confirm(\"Continue?\") != \"OK\":#7~20s\n exit()\n pag.hotkey(\"end\")\n time.sleep(0.5)\n pag.click(1333,551)\n time.sleep(2)\n pag.hotkey(\"end\")\n time.sleep(0.5)\n pag.click(1360,548)\n time.sleep(6)\n for i in range(profile[\"certificate\"].len()):\n pag.hotkey(\"end\")\n time.sleep(0.5)\n pag.hotkey(\"pageup\")\n time.sleep(0.5)\n pag.click(839,486)\n time.sleep(2)\n pag.hotkey(\"f5\")\n time.sleep(6)\n pag.click(787,489)\n time.sleep(1)\n pag.typewrite(profile[\"certificate\"][i])\n pag.click(788,582)\n time.sleep(0.5)\n pag.click(708,629)\n pag.typewrite(profile[\"certificate\"][i])\n time.sleep(0.5)\n pag.click(1240,728)\n time.sleep(7)\n pag.hotkey(\"end\")\n time.sleep(0.5)\n pag.click(979,554)\n time.sleep(2)\n pag.click(897,409)\n pag.typewrite(profile[\"other_exp\"][\"title\"])\n pag.click(776,524)\n pag.typewrite(profile[\"other_exp\"][\"description\"])\n time.sleep(0.5)\n pag.click(1249,823)\n time.sleep(3)\n \n \n pag.alert(\"Done\")\n # while True:\n # print(pag.position())" } ]
import argparse from FreelancerBot import FreelancerBot from UpworkBot import UpworkBot
4,501
parser = argparse.ArgumentParser() parser.add_argument("-t", "--type", help="Specify whether freelancer or upwork.") args = parser.parse_args() account_type=args.type if not isinstance(account_type, str): raise TypeError("Missing/Incorrect account_type") if account_type == "freelancer": bot=FreelancerBot() bot.create() if account_type == "upwork":
parser = argparse.ArgumentParser() parser.add_argument("-t", "--type", help="Specify whether freelancer or upwork.") args = parser.parse_args() account_type=args.type if not isinstance(account_type, str): raise TypeError("Missing/Incorrect account_type") if account_type == "freelancer": bot=FreelancerBot() bot.create() if account_type == "upwork":
bot=UpworkBot()
1
2023-12-05 00:14:06+00:00
8k
ChatClue/ChatClue
osiris.py
[ { "identifier": "OSHelper", "path": "utils/os/helpers.py", "snippet": "class OSHelper:\n \"\"\"\n Provides utility methods for operating system level operations, particularly file management.\n\n This class includes static methods for performing various file system tasks such as cleaning up orphaned files and retrieving files.\n \"\"\"\n\n @staticmethod\n def find_closest_image(directory, target_time):\n \"\"\"\n Finds the closest image file in a directory based on the target time.\n\n This function searches through all JPG files in the specified directory and \n selects the one whose creation time is closest to, but not earlier than, \n the target time.\n\n Args:\n directory (str): The directory path where the image files are stored.\n target_time (float): The target time (in seconds since epoch) to compare the file creation times against.\n\n Returns:\n str: The path of the closest image file. Returns None if no suitable file is found.\n \"\"\"\n closest_file = None\n closest_time_diff = None\n\n # Iterate over each file in the specified directory\n for filename in os.listdir(directory):\n if filename.lower().endswith(\".jpg\"): # Check if the file is a JPG image\n filepath = os.path.join(directory, filename)\n filetime = os.path.getmtime(filepath) # Get the modification time of the file\n # Check if the file's time is later than the target time and if it's the closest so far\n if filetime > target_time:\n logging.info(f\"File is close: {filepath} - Time: {filetime}\")\n time_diff = filetime - target_time\n if closest_time_diff is None or time_diff < closest_time_diff:\n closest_file = filepath\n closest_time_diff = time_diff\n return closest_file\n\n @staticmethod\n def convert_image_to_base64(filepath):\n \"\"\"\n Converts an image file to a Base64 encoded string.\n\n This function reads the image file from the given filepath, encodes it in Base64,\n and then decodes it to a UTF-8 string, which can be easily used for data transfer \n or embedding in web pages.\n\n Args:\n filepath (str): The path of the image file to be converted.\n\n Returns:\n str: The Base64 encoded string of the image.\n \"\"\"\n with open(filepath, \"rb\") as image_file:\n # Read the file and encode it in Base64\n return base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n @staticmethod\n def clear_orphaned_audio_files():\n \"\"\"\n Removes all audio files in a specific directory.\n\n This method is used to clear out any leftover audio files in the 'tmp/audio' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for audio files\n directory_path = 'tmp/audio'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n \n @staticmethod\n def clear_orphaned_video_files():\n \"\"\"\n Removes all video files in a specific directory.\n\n This method is used to clear out any leftover video files in the 'tmp/video' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for video files\n directory_path = 'tmp/video'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n\n @staticmethod\n def system_file_cleanup():\n \"\"\"\n Performs a general cleanup of system files.\n\n Currently, this method focuses on clearing orphaned audio files but can be expanded to include other cleanup tasks.\n \"\"\"\n # Clear orphaned audio files\n OSHelper.clear_orphaned_audio_files()\n OSHelper.clear_orphaned_video_files()\n \n @staticmethod\n def configure_tmp_directories():\n \"\"\"\n Ensures that the required directories (tmp/audio and tmp/video) exist.\n Creates them if they do not exist.\n \"\"\"\n directories = ['tmp/audio', 'tmp/video']\n for directory in directories:\n os.makedirs(directory, exist_ok=True)\n logging.info(f\"Checked and ensured directory exists: {directory}\")" }, { "identifier": "get_celery_app", "path": "celery_config.py", "snippet": "def get_celery_app():\n return celery_app" }, { "identifier": "DatabaseSetup", "path": "database/setup.py", "snippet": "class DatabaseSetup:\n \"\"\"\n This class is responsible for database setup tasks, particularly\n for ensuring that all defined tables in SQLAlchemy models are created in the database.\n \"\"\"\n\n @staticmethod\n def initial_setup():\n \"\"\"\n Creates tables in the database based on the SQLAlchemy models.\n\n This method uses the SQLAlchemy engine to connect to the database and creates\n any tables that haven't been created yet as defined in the SQLAlchemy model classes.\n It's intended to be run during the initial setup phase of the application.\n \"\"\"\n\n # Obtain the SQLAlchemy engine\n engine = get_engine()\n\n # Ensure vector extension is enabled.\n with engine.begin() as connection:\n # Create extension 'pgvector' if it is not created yet\n # Remember, you may need to install pgvector on your system before this will work properly.\n # https://github.com/pgvector/pgvector.git for instructions.\n connection.execute(text(\"CREATE EXTENSION IF NOT EXISTS vector\"))\n\n # Create all tables in the database defined in the SQLAlchemy models\n # This will have no effect on existing tables that match the model definitions\n Base.metadata.create_all(engine)" }, { "identifier": "broadcaster", "path": "broadcast/broadcaster.py", "snippet": "class Broadcaster:\n def __init__(self):\n def send_message(self, message):\n def start(self):\n def shutdown(self):" }, { "identifier": "AudioProcessor", "path": "audio/audio_processor.py", "snippet": "class AudioProcessor:\n \"\"\"\n A class to handle audio processing, including capturing audio input, \n processing it with Vosk for speech recognition, and responding using OpenAI's GPT model.\n\n Attributes:\n model (Vosk.Model): Vosk speech recognition model.\n samplerate (int): The sample rate for audio capture.\n device (str): The name of the audio input device.\n blocksize (int): The block size for audio processing.\n dump_filename (str): Filename to dump the audio input, if provided.\n \"\"\"\n\n def __init__(self):\n self.model = Model(lang=AUDIO_SETTINGS.get('VOSK_MODEL', \"en-us\"))\n self.samplerate = AUDIO_SETTINGS.get('SOUND_DEVICE_SAMPLERATE')\n self.device = AUDIO_SETTINGS.get('SOUND_DEVICE_DEVICE')\n self.blocksize = AUDIO_SETTINGS.get('SOUND_DEVICE_BLOCK_SIZE', 28000)\n self.dump_filename = AUDIO_SETTINGS.get('AUDIO_IN_DUMP_FILENAME')\n self.audio_queue = queue.Queue()\n self.openai_client = OpenAIClient()\n self.openai_conversation_builder = OpenAIConversationBuilder()\n self.tool_processor = ToolProcessor()\n self.broadcaster = broadcaster\n self.audio_out = get_audio_out()\n self.audio_out_response_buffer = ''\n self.full_assistant_response = ''\n self.last_wake_time = 0\n self.last_response_end_time = 0\n self.processing_openai_request = False\n self.shutdown_event = threading.Event()\n\n def open_dump_file(self):\n \"\"\"Opens the file to dump audio input if a filename is provided.\"\"\"\n if self.dump_filename is not None:\n self.dump_filename = open(self.dump_filename, \"wb\")\n\n def close_dump_file(self):\n \"\"\"Closes the audio dump file if it was opened.\"\"\"\n if self.dump_filename is not None:\n self.dump_filename.close()\n\n def should_process(self, result, current_time):\n \"\"\"\n Determines whether the robot should process the input based on wake phrases or elapsed time.\n\n Args:\n result (str): The recognized text from the audio input.\n current_time (float): The current time in seconds.\n\n Returns:\n bool: True if the input should be processed, False otherwise.\n \"\"\"\n return (not contains_quiet_please_phrase(result) and contains_wake_phrase(result)) or \\\n (not contains_quiet_please_phrase(result) and (current_time - self.last_wake_time <= 10) or (current_time - self.last_response_end_time <= 10) and not self.audio_out.is_playing) \\\n\n def update_wake_time(self):\n \"\"\"Updates the time when a wake phrase was last heard.\"\"\"\n self.last_wake_time = time.time()\n self.save_system_state()\n\n def update_response_end_time(self):\n \"\"\"Updates the time when the robot's last response ended.\"\"\"\n self.last_response_end_time = time.time()\n\n def callback(self, indata, frames, time, status):\n \"\"\"\n Callback function for audio input stream.\n\n Args:\n indata: The buffer containing the incoming sound.\n frames: The number of frames.\n time: Current stream time.\n status: Status of the stream.\n \"\"\"\n if status:\n logging.warning(status)\n self.audio_queue.put(bytes(indata))\n\n def process_stream(self):\n \"\"\"\n Processes the audio stream by recognizing speech and generating responses.\n\n Continuously captures audio, performs speech recognition, and generates responses using OpenAI.\n \"\"\"\n self.open_dump_file()\n try:\n with sd.RawInputStream(samplerate=self.samplerate, blocksize=self.blocksize, device=self.device,\n dtype=\"int16\", channels=1, callback=self.callback):\n rec = KaldiRecognizer(self.model, self.samplerate)\n openai_stream_thread = None\n\n while not self.shutdown_event.is_set():\n data, current_time = self.get_audio_data()\n result = self.process_recognition(data, rec)\n\n if result:\n openai_stream_thread = self.handle_speech(result, openai_stream_thread, current_time)\n\n self.handle_partial_results(rec)\n self.write_to_dump_file(data)\n self.process_openai_response()\n\n # except Exception as e:\n # logging.error(f\"An error occurred: {e}\")\n finally:\n self.close_dump_file()\n\n def get_audio_data(self):\n \"\"\"\n Retrieves audio data from the queue.\n\n Returns:\n tuple: A tuple containing the audio data and the current time.\n \"\"\"\n data = self.audio_queue.get()\n current_time = time.time()\n return data, current_time\n\n def process_recognition(self, data, rec):\n \"\"\"\n Processes the recognition of speech from audio data.\n\n Args:\n data: The audio data to be processed.\n rec (KaldiRecognizer): The Vosk recognizer instance.\n\n Returns:\n str or None: Recognized text or None if no significant speech is recognized.\n \"\"\"\n if rec.AcceptWaveform(data):\n result = json.loads(rec.Result())[\"text\"]\n if result not in ['', 'huh']:\n self.broadcaster.send_message(result)\n logging.info(\"ROBOT HEARD: \" + result)\n return result\n return None\n\n def handle_speech(self, result, openai_stream_thread, current_time):\n \"\"\"\n Processes the recognized speech and determines the appropriate response.\n\n Args:\n result (str): Recognized speech text.\n openai_stream_thread (threading.Thread): The current OpenAI stream thread.\n current_time (float): Current time in seconds.\n\n Returns:\n threading.Thread: Updated or new OpenAI stream thread.\n \"\"\"\n try:\n if self.should_process(result, current_time) and not self.processing_openai_request:\n self.update_wake_time()\n self.processing_openai_request = True\n if not openai_stream_thread or not openai_stream_thread.is_alive():\n self.openai_client.stop_signal.clear()\n is_tool_request, conversation = self.determine_tool_request(result)\n if is_tool_request:\n self.handle_tool_request(result, conversation)\n else:\n self.continue_conversation(result, conversation)\n else:\n logging.info(\"ROBOT THOUGHT: Ignoring Conversation, it doesn't appear to be relevant.\")\n finally:\n self.processing_openai_request = False\n return openai_stream_thread\n \n \n def determine_tool_request(self, result):\n \"\"\"\n Determines whether the given input text is a tool request.\n\n Args:\n result (str): The recognized text to evaluate.\n\n Returns:\n Tuple[bool, list]: A tuple containing a boolean indicating whether it's a tool request, \n and the conversation array for further processing.\n \"\"\"\n call_type_messages = self.openai_conversation_builder.create_check_if_tool_call_messages(result)\n openai_is_tool_response = self.openai_client.create_completion(call_type_messages, False, {\"type\": \"json_object\"}, openai_functions, True)\n \n is_tool_request = False\n conversation = self.openai_conversation_builder.create_recent_conversation_messages_array(result)\n\n try:\n if openai_is_tool_response and openai_is_tool_response.choices:\n is_tool_request = json.loads(openai_is_tool_response.choices[0].message.content).get(\"is_tool\", False)\n except (TypeError, AttributeError, json.JSONDecodeError):\n print(\"Error parsing OpenAI response or response not in expected format.\")\n\n return is_tool_request, conversation\n\n def handle_tool_request(self, result, conversation):\n \"\"\"\n Handles the processing of a tool request.\n\n Args:\n result (str): The recognized text.\n conversation (list): The conversation array built up to this point.\n \"\"\"\n tool_response = self.openai_client.create_completion(conversation, False, None, openai_functions)\n tool_response_message = tool_response.choices[0].message \n tool_calls = tool_response_message.tool_calls \n if tool_calls:\n self.process_tool_calls(tool_calls, result, conversation, tool_response_message)\n else:\n self.continue_conversation(result, conversation)\n\n def process_tool_calls(self, tool_calls, result, conversation, tool_response_message):\n \"\"\"\n Processes the tool calls received from OpenAI.\n\n Args:\n tool_calls (list): List of tool calls from OpenAI response.\n result (str): The recognized text.\n conversation (list): The conversation array.\n tool_response_message (Message): The tool response message from OpenAI.\n \"\"\"\n tool_call = tool_calls[0]\n tool_processor_response = self.tool_processor.process_tool_request(tool_call)\n if tool_processor_response[\"success\"]:\n self.handle_successful_tool_response(tool_processor_response, result, conversation, tool_response_message)\n else:\n self.audio_out.add_to_queue(get_tool_not_found_phrase())\n\n def handle_successful_tool_response(self, tool_processor_response, result, conversation, tool_response_message):\n \"\"\"\n Handles a successful tool response.\n\n Args:\n tool_processor_response (dict): The response from the tool processor.\n result (str): The recognized text.\n conversation (list): The conversation array.\n tool_response_message (Message): The tool response message from OpenAI.\n \"\"\"\n if tool_processor_response[\"is_conversational\"]:\n conversation.append(tool_response_message)\n tool_call_response_message = self.openai_conversation_builder.create_tool_call_response_message(tool_processor_response)\n conversation.append(tool_call_response_message)\n openai_stream_thread = threading.Thread(target=self.openai_client.stream_response, args=(conversation,))\n openai_stream_thread.start()\n else:\n self.store_conversation(speaker_type=CONVERSATIONS_CONFIG[\"user\"], response=result)\n\n def continue_conversation(self, result, conversation):\n \"\"\"\n Continues the conversation with OpenAI based on the given result.\n\n Args:\n result (str): The recognized text to continue the conversation with.\n conversation (list): The existing conversation array.\n \"\"\"\n self.openai_client.stop_processing_request()\n conversation = self.openai_conversation_builder.create_recent_conversation_messages_array(result)\n openai_stream_thread = threading.Thread(target=self.openai_client.stream_response, args=(conversation,))\n openai_stream_thread.start()\n logging.info(\"ROBOT ACTION: Committing user input to memory.\")\n self.store_conversation(speaker_type=CONVERSATIONS_CONFIG[\"user\"], response=result)\n\n\n def handle_partial_results(self, rec):\n \"\"\"\n Handles partial results from speech recognition.\n\n Args:\n rec (KaldiRecognizer): The Vosk recognizer instance.\n \"\"\"\n partial_result_json = json.loads(rec.PartialResult())\n if 'partial' in partial_result_json and contains_quiet_please_phrase(partial_result_json['partial']):\n self.stop_conversation_and_audio()\n\n def stop_conversation_and_audio(self):\n \"\"\"\n Stops the conversation and any ongoing audio processing.\n \"\"\"\n logging.info(\"ROBOT THOUGHT: Request to stop talking recognized. Stopping stream.\")\n self.stop_all_audio()\n if self.full_assistant_response:\n logging.info(\"ROBOT ACTION: Committing my partial response to memory\")\n self.store_full_assistant_response()\n\n def stop_all_audio(self):\n self.audio_out_response_buffer = ''\n self.openai_client.stop_processing_request()\n self.audio_out.stop_all_audio()\n\n def write_to_dump_file(self, data):\n \"\"\"\n Writes audio data to the dump file if it's open.\n\n Args:\n data: The audio data to be written to the file.\n \"\"\"\n if self.dump_filename is not None:\n self.dump_filename.write(data)\n\n def process_openai_response(self):\n \"\"\"\n Processes responses from OpenAI's GPT model.\n\n Retrieves and handles the responses generated by OpenAI.\n \"\"\"\n while not self.openai_client.response_queue.empty():\n chunk = self.openai_client.response_queue.get()\n if chunk.choices[0].delta.content is not None:\n response_text = chunk.choices[0].delta.content\n print(response_text, end='', flush=True)\n self.update_response_end_time()\n self.audio_out_response_buffer += response_text\n if self.audio_out_response_buffer.endswith(('.', '?', '!', ';')):\n self.audio_out.add_to_queue(self.audio_out_response_buffer)\n self.audio_out_response_buffer = \"\"\n self.full_assistant_response += response_text\n\n if self.full_assistant_response and self.openai_client.streaming_complete:\n logging.info(\"ROBOT ACTION: Committing my full response to memory\")\n self.store_full_assistant_response()\n\n def store_full_assistant_response(self):\n \"\"\"\n Stores the full assistant response in the database.\n \"\"\"\n self.store_conversation(speaker_type=CONVERSATIONS_CONFIG[\"assistant\"], response=self.full_assistant_response)\n self.full_assistant_response = ''\n\n def store_conversation(self, speaker_type, response):\n \"\"\"\n Stores the conversation part in the database asynchronously using a Celery task.\n\n Args:\n speakerType (str): \"user\" or \"assistant\", indicating who is speaking.\n response (str): The text of the response.\n \"\"\"\n get_celery_app().send_task('background.memory.tasks.store_conversation_task', args=[speaker_type, response])\n logging.info(\"Store conversation task submitted to background\")\n \n def save_system_state(self):\n \"\"\"\n Saves the system state in the database asynchronously using a Celery task.\n \"\"\"\n get_celery_app().send_task('background.memory.tasks.update_system_state_task', args=[self.last_wake_time])\n logging.info(\"Update system state task submitted to background\")\n\n def shutdown(self):\n self.shutdown_event.set()" }, { "identifier": "VideoProcessor", "path": "video/video_processor.py", "snippet": "class VideoProcessor:\n \"\"\"\n A class to handle video processing, including capturing video input and \n processing it with MediaPipe for pose estimation.\n \"\"\"\n\n def __init__(self):\n # MediaPipe Pose solution initialization\n self.mp_pose = mp.solutions.pose\n self.pose = self.mp_pose.Pose()\n self.cap = None\n\n # Video capture settings\n self.frame_rate = VIDEO_SETTINGS.get('FRAME_RATE', 30)\n self.device = VIDEO_SETTINGS.get('VIDEO_DEVICE', 0)\n self.capture_interval = VIDEO_SETTINGS.get('CAPTURE_INTERVAL', 1)\n self.frame_counter = 0\n self.last_capture_time = time.time()\n self.frame_queue = queue.Queue()\n\n # Check and create tmp directory for storing frames\n self.tmp_folder = 'tmp/video'\n if not os.path.exists(self.tmp_folder):\n os.makedirs(self.tmp_folder)\n\n self.shutdown_event = threading.Event()\n\n def process_stream(self):\n \"\"\"\n Captures and processes the video stream.\n \"\"\"\n if VIDEO_SETTINGS.get('CAPTURE_VIDEO', False):\n self.cap = cv2.VideoCapture(self.device)\n\n while not self.shutdown_event.is_set():\n ret, frame = self.cap.read()\n if not ret:\n continue\n\n # Process the frame\n #self.process_frame(frame)\n\n # Capture frames at a set interval for saving\n if time.time() - self.last_capture_time > self.capture_interval:\n frame_name = os.path.join(self.tmp_folder, f\"frame_{self.frame_counter}.jpg\")\n cv2.imwrite(frame_name, frame)\n logging.debug(f\"Frame saved as {frame_name}\")\n self.frame_counter += 1\n self.last_capture_time = time.time()\n\n self.clean_up()\n \n def clean_up(self):\n \"\"\"\n Releases resources and closes windows.\n \"\"\"\n if self.cap:\n self.cap.release()\n cv2.destroyAllWindows()\n OSHelper.clear_orphaned_video_files()\n\n def process_frame(self, frame):\n \"\"\"\n Processes a single video frame.\n \"\"\"\n self.frame_queue.put(frame)\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n results = self.pose.process(frame_rgb)\n\n if results.pose_landmarks:\n # Draw pose landmarks\n mp.solutions.drawing_utils.draw_landmarks(frame, results.pose_landmarks, self.mp_pose.POSE_CONNECTIONS)\n # Additional processing can be added here\n \n def shutdown(self):\n \"\"\"\n Signals the thread to terminate.\n \"\"\"\n self.shutdown_event.set()" }, { "identifier": "get_audio_out", "path": "audio/audio_out.py", "snippet": "def get_audio_out():\n \"\"\"\n Returns the instance of AudioOutput for use.\n\n Returns:\n AudioOutput: The instance of the AudioOutput class.\n \"\"\"\n return audio_out" }, { "identifier": "OSHelper", "path": "utils/os/helpers.py", "snippet": "class OSHelper:\n \"\"\"\n Provides utility methods for operating system level operations, particularly file management.\n\n This class includes static methods for performing various file system tasks such as cleaning up orphaned files and retrieving files.\n \"\"\"\n\n @staticmethod\n def find_closest_image(directory, target_time):\n \"\"\"\n Finds the closest image file in a directory based on the target time.\n\n This function searches through all JPG files in the specified directory and \n selects the one whose creation time is closest to, but not earlier than, \n the target time.\n\n Args:\n directory (str): The directory path where the image files are stored.\n target_time (float): The target time (in seconds since epoch) to compare the file creation times against.\n\n Returns:\n str: The path of the closest image file. Returns None if no suitable file is found.\n \"\"\"\n closest_file = None\n closest_time_diff = None\n\n # Iterate over each file in the specified directory\n for filename in os.listdir(directory):\n if filename.lower().endswith(\".jpg\"): # Check if the file is a JPG image\n filepath = os.path.join(directory, filename)\n filetime = os.path.getmtime(filepath) # Get the modification time of the file\n # Check if the file's time is later than the target time and if it's the closest so far\n if filetime > target_time:\n logging.info(f\"File is close: {filepath} - Time: {filetime}\")\n time_diff = filetime - target_time\n if closest_time_diff is None or time_diff < closest_time_diff:\n closest_file = filepath\n closest_time_diff = time_diff\n return closest_file\n\n @staticmethod\n def convert_image_to_base64(filepath):\n \"\"\"\n Converts an image file to a Base64 encoded string.\n\n This function reads the image file from the given filepath, encodes it in Base64,\n and then decodes it to a UTF-8 string, which can be easily used for data transfer \n or embedding in web pages.\n\n Args:\n filepath (str): The path of the image file to be converted.\n\n Returns:\n str: The Base64 encoded string of the image.\n \"\"\"\n with open(filepath, \"rb\") as image_file:\n # Read the file and encode it in Base64\n return base64.b64encode(image_file.read()).decode(\"utf-8\")\n\n @staticmethod\n def clear_orphaned_audio_files():\n \"\"\"\n Removes all audio files in a specific directory.\n\n This method is used to clear out any leftover audio files in the 'tmp/audio' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for audio files\n directory_path = 'tmp/audio'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n \n @staticmethod\n def clear_orphaned_video_files():\n \"\"\"\n Removes all video files in a specific directory.\n\n This method is used to clear out any leftover video files in the 'tmp/video' directory. \n It iterates through all files in the specified directory and deletes them.\n \"\"\"\n # Specify the directory path for video files\n directory_path = 'tmp/video'\n\n # Iterate through and remove each file in the directory\n for filename in os.listdir(directory_path):\n file_path = os.path.join(directory_path, filename)\n try:\n os.remove(file_path)\n logging.info(f\"Removed file: {file_path}\")\n except OSError as e:\n logging.info(f\"Error removing file {file_path}: {e}\")\n\n @staticmethod\n def system_file_cleanup():\n \"\"\"\n Performs a general cleanup of system files.\n\n Currently, this method focuses on clearing orphaned audio files but can be expanded to include other cleanup tasks.\n \"\"\"\n # Clear orphaned audio files\n OSHelper.clear_orphaned_audio_files()\n OSHelper.clear_orphaned_video_files()\n \n @staticmethod\n def configure_tmp_directories():\n \"\"\"\n Ensures that the required directories (tmp/audio and tmp/video) exist.\n Creates them if they do not exist.\n \"\"\"\n directories = ['tmp/audio', 'tmp/video']\n for directory in directories:\n os.makedirs(directory, exist_ok=True)\n logging.info(f\"Checked and ensured directory exists: {directory}\")" }, { "identifier": "welcome_message", "path": "utils/text/welcome.py", "snippet": "def welcome_message():\n print(\"\"\"\n ChatClue: Osiris\n \n /\\_/\\ \n ( o.o ) \n > ^ <\n \n Optimized System for Integrated Real-Time Interaction and Sensing\n \"\"\")" }, { "identifier": "ColorFormatter", "path": "utils/logging/colors.py", "snippet": "class ColorFormatter(logging.Formatter):\n def format(self, record):\n levelname = record.levelname\n message = logging.Formatter.format(self, record)\n return COLORS.get(levelname, '') + message + COLORS['ENDC']" } ]
from config import CELERY_CONFIG, LOG_LEVEL, VIDEO_SETTINGS from utils.os.helpers import OSHelper from celery import Celery from celery_config import get_celery_app from database.setup import DatabaseSetup from broadcast.broadcaster import broadcaster from audio.audio_processor import AudioProcessor from video.video_processor import VideoProcessor from audio.audio_out import get_audio_out from utils.os.helpers import OSHelper from utils.text.welcome import welcome_message from utils.logging.colors import ColorFormatter from background.memory.tasks import * from tools import * # Import all openai tool functions import logging import subprocess import atexit import sys import threading import time import cv2 import queue
6,852
# Configure basic logging for the application logging.basicConfig(level=LOG_LEVEL) root_logger = logging.getLogger() for handler in root_logger.handlers: handler.setFormatter(ColorFormatter('%(asctime)s - %(levelname)s - %(message)s')) # Ensure the necessary tmp/ directories exist OSHelper.configure_tmp_directories() # Configure background processor / subconcious systems
# Configure basic logging for the application logging.basicConfig(level=LOG_LEVEL) root_logger = logging.getLogger() for handler in root_logger.handlers: handler.setFormatter(ColorFormatter('%(asctime)s - %(levelname)s - %(message)s')) # Ensure the necessary tmp/ directories exist OSHelper.configure_tmp_directories() # Configure background processor / subconcious systems
celery_app = get_celery_app()
1
2023-12-06 09:10:06+00:00
8k
GXNU-ZhongLab/ODTrack
lib/test/tracker/odtrack.py
[ { "identifier": "build_odtrack", "path": "lib/models/odtrack/odtrack.py", "snippet": "def build_odtrack(cfg, training=True):\r\n current_dir = os.path.dirname(os.path.abspath(__file__)) # This is your Project Root\r\n pretrained_path = os.path.join(current_dir, '../../../pretrained_networks')\r\n if cfg.MODEL.PRETRAIN_FILE and ('OSTrack' not in cfg.MODEL.PRETRAIN_FILE) and training:\r\n pretrained = os.path.join(pretrained_path, cfg.MODEL.PRETRAIN_FILE)\r\n else:\r\n pretrained = ''\r\n\r\n if cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224':\r\n backbone = vit_base_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE,\r\n add_cls_token=cfg.MODEL.BACKBONE.ADD_CLS_TOKEN,\r\n attn_type=cfg.MODEL.BACKBONE.ATTN_TYPE,)\r\n\r\n elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224':\r\n backbone = vit_large_patch16_224(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE, \r\n add_cls_token=cfg.MODEL.BACKBONE.ADD_CLS_TOKEN,\r\n attn_type=cfg.MODEL.BACKBONE.ATTN_TYPE, \r\n )\r\n \r\n elif cfg.MODEL.BACKBONE.TYPE == 'vit_base_patch16_224_ce':\r\n backbone = vit_base_patch16_224_ce(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE,\r\n ce_loc=cfg.MODEL.BACKBONE.CE_LOC,\r\n ce_keep_ratio=cfg.MODEL.BACKBONE.CE_KEEP_RATIO,\r\n add_cls_token=cfg.MODEL.BACKBONE.ADD_CLS_TOKEN,\r\n )\r\n\r\n elif cfg.MODEL.BACKBONE.TYPE == 'vit_large_patch16_224_ce':\r\n backbone = vit_large_patch16_224_ce(pretrained, drop_path_rate=cfg.TRAIN.DROP_PATH_RATE,\r\n ce_loc=cfg.MODEL.BACKBONE.CE_LOC,\r\n ce_keep_ratio=cfg.MODEL.BACKBONE.CE_KEEP_RATIO,\r\n add_cls_token=cfg.MODEL.BACKBONE.ADD_CLS_TOKEN,\r\n )\r\n\r\n else:\r\n raise NotImplementedError\r\n hidden_dim = backbone.embed_dim\r\n patch_start_index = 1\r\n \r\n backbone.finetune_track(cfg=cfg, patch_start_index=patch_start_index)\r\n\r\n box_head = build_box_head(cfg, hidden_dim)\r\n\r\n model = ODTrack(\r\n backbone,\r\n box_head,\r\n aux_loss=False,\r\n head_type=cfg.MODEL.HEAD.TYPE,\r\n token_len=cfg.MODEL.BACKBONE.TOKEN_LEN,\r\n )\r\n\r\n return model\r" }, { "identifier": "BaseTracker", "path": "lib/test/tracker/basetracker.py", "snippet": "class BaseTracker:\n \"\"\"Base class for all trackers.\"\"\"\n\n def __init__(self, params):\n self.params = params\n self.visdom = None\n\n def predicts_segmentation_mask(self):\n return False\n\n def initialize(self, image, info: dict) -> dict:\n \"\"\"Overload this function in your tracker. This should initialize the model.\"\"\"\n raise NotImplementedError\n\n def track(self, image, info: dict = None) -> dict:\n \"\"\"Overload this function in your tracker. This should track in the frame and update the model.\"\"\"\n raise NotImplementedError\n\n def visdom_draw_tracking(self, image, box, segmentation=None):\n if isinstance(box, OrderedDict):\n box = [v for k, v in box.items()]\n else:\n box = (box,)\n if segmentation is None:\n self.visdom.register((image, *box), 'Tracking', 1, 'Tracking')\n else:\n self.visdom.register((image, *box, segmentation), 'Tracking', 1, 'Tracking')\n\n def transform_bbox_to_crop(self, box_in, resize_factor, device, box_extract=None, crop_type='template'):\n # box_in: list [x1, y1, w, h], not normalized\n # box_extract: same as box_in\n # out bbox: Torch.tensor [1, 1, 4], x1y1wh, normalized\n if crop_type == 'template':\n crop_sz = torch.Tensor([self.params.template_size, self.params.template_size])\n elif crop_type == 'search':\n crop_sz = torch.Tensor([self.params.search_size, self.params.search_size])\n else:\n raise NotImplementedError\n\n box_in = torch.tensor(box_in)\n if box_extract is None:\n box_extract = box_in\n else:\n box_extract = torch.tensor(box_extract)\n template_bbox = transform_image_to_crop(box_in, box_extract, resize_factor, crop_sz, normalize=True)\n template_bbox = template_bbox.view(1, 1, 4).to(device)\n\n return template_bbox\n\n def _init_visdom(self, visdom_info, debug):\n visdom_info = {} if visdom_info is None else visdom_info\n self.pause_mode = False\n self.step = False\n self.next_seq = False\n if debug > 0 and visdom_info.get('use_visdom', True):\n try:\n self.visdom = Visdom(debug, {'handler': self._visdom_ui_handler, 'win_id': 'Tracking'},\n visdom_info=visdom_info)\n\n # # Show help\n # help_text = 'You can pause/unpause the tracker by pressing ''space'' with the ''Tracking'' window ' \\\n # 'selected. During paused mode, you can track for one frame by pressing the right arrow key.' \\\n # 'To enable/disable plotting of a data block, tick/untick the corresponding entry in ' \\\n # 'block list.'\n # self.visdom.register(help_text, 'text', 1, 'Help')\n except:\n time.sleep(0.5)\n print('!!! WARNING: Visdom could not start, so using matplotlib visualization instead !!!\\n'\n '!!! Start Visdom in a separate terminal window by typing \\'visdom\\' !!!')\n\n def _visdom_ui_handler(self, data):\n if data['event_type'] == 'KeyPress':\n if data['key'] == ' ':\n self.pause_mode = not self.pause_mode\n\n elif data['key'] == 'ArrowRight' and self.pause_mode:\n self.step = True\n\n elif data['key'] == 'n':\n self.next_seq = True" }, { "identifier": "gen_visualization", "path": "lib/test/tracker/vis_utils.py", "snippet": "def gen_visualization(image, mask_indices, patch_size=16):\r\n # image [224, 224, 3]\r\n # mask_indices, list of masked token indices\r\n\r\n # mask mask_indices need to cat\r\n # mask_indices = mask_indices[::-1]\r\n num_stages = len(mask_indices)\r\n for i in range(1, num_stages):\r\n mask_indices[i] = np.concatenate([mask_indices[i-1], mask_indices[i]], axis=1)\r\n\r\n # keep_indices = get_keep_indices(decisions)\r\n image = np.asarray(image)\r\n H, W, C = image.shape\r\n Hp, Wp = H // patch_size, W // patch_size\r\n image_tokens = image.reshape(Hp, patch_size, Wp, patch_size, 3).swapaxes(1, 2).reshape(Hp * Wp, patch_size, patch_size, 3)\r\n\r\n stages = [\r\n recover_image(gen_masked_tokens(image_tokens, mask_indices[i]), H, W, Hp, Wp, patch_size)\r\n for i in range(num_stages)\r\n ]\r\n imgs = [image] + stages\r\n imgs = [pad_img(img) for img in imgs]\r\n viz = np.concatenate(imgs, axis=1)\r\n return viz\r" }, { "identifier": "hann2d", "path": "lib/test/utils/hann.py", "snippet": "def hann2d(sz: torch.Tensor, centered = True) -> torch.Tensor:\n \"\"\"2D cosine window.\"\"\"\n return hann1d(sz[0].item(), centered).reshape(1, 1, -1, 1) * hann1d(sz[1].item(), centered).reshape(1, 1, 1, -1)" }, { "identifier": "sample_target", "path": "lib/train/data/processing_utils.py", "snippet": "def sample_target(im, target_bb, search_area_factor, output_sz=None, mask=None):\n \"\"\" Extracts a square crop centered at target_bb box, of area search_area_factor^2 times target_bb area\n\n args:\n im - cv image\n target_bb - target box [x, y, w, h]\n search_area_factor - Ratio of crop size to target size\n output_sz - (float) Size to which the extracted crop is resized (always square). If None, no resizing is done.\n\n returns:\n cv image - extracted crop\n float - the factor by which the crop has been resized to make the crop size equal output_size\n \"\"\"\n if not isinstance(target_bb, list):\n x, y, w, h = target_bb.tolist()\n else:\n x, y, w, h = target_bb\n # Crop image\n crop_sz = math.ceil(math.sqrt(w * h) * search_area_factor)\n\n if crop_sz < 1:\n raise Exception('Too small bounding box.')\n # x1, y1, x2, y2 of crop image\n x1 = round(x + 0.5 * w - crop_sz * 0.5)\n x2 = x1 + crop_sz\n\n y1 = round(y + 0.5 * h - crop_sz * 0.5)\n y2 = y1 + crop_sz\n \n x1_pad = max(0, -x1)\n x2_pad = max(x2 - im.shape[1] + 1, 0)\n\n y1_pad = max(0, -y1)\n y2_pad = max(y2 - im.shape[0] + 1, 0)\n\n # Crop target\n im_crop = im[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad, :]\n if mask is not None:\n mask_crop = mask[y1 + y1_pad:y2 - y2_pad, x1 + x1_pad:x2 - x2_pad]\n\n # Pad\n im_crop_padded = cv.copyMakeBorder(im_crop, y1_pad, y2_pad, x1_pad, x2_pad, cv.BORDER_CONSTANT)\n # deal with attention mask\n H, W, _ = im_crop_padded.shape\n att_mask = np.ones((H,W))\n end_x, end_y = -x2_pad, -y2_pad\n if y2_pad == 0:\n end_y = None\n if x2_pad == 0:\n end_x = None\n att_mask[y1_pad:end_y, x1_pad:end_x] = 0 # mask is 0 for non-padding areas (image content)\n if mask is not None:\n mask_crop_padded = F.pad(mask_crop, pad=(x1_pad, x2_pad, y1_pad, y2_pad), mode='constant', value=0)\n\n if output_sz is not None:\n resize_factor = output_sz / crop_sz\n im_crop_padded = cv.resize(im_crop_padded, (output_sz, output_sz))\n att_mask = cv.resize(att_mask, (output_sz, output_sz)).astype(np.bool_)\n if mask is None:\n return im_crop_padded, resize_factor, att_mask\n mask_crop_padded = \\\n F.interpolate(mask_crop_padded[None, None], (output_sz, output_sz), mode='bilinear', align_corners=False)[0, 0]\n return im_crop_padded, resize_factor, att_mask, mask_crop_padded\n\n else:\n if mask is None:\n return im_crop_padded, att_mask.astype(np.bool_), 1.0\n return im_crop_padded, 1.0, att_mask.astype(np.bool_), mask_crop_padded" }, { "identifier": "Preprocessor", "path": "lib/test/tracker/data_utils.py", "snippet": "class Preprocessor(object):\n def __init__(self):\n self.mean = torch.tensor([0.485, 0.456, 0.406]).view((1, 3, 1, 1)).cuda()\n self.std = torch.tensor([0.229, 0.224, 0.225]).view((1, 3, 1, 1)).cuda()\n\n def process(self, img_arr: np.ndarray, amask_arr: np.ndarray):\n # Deal with the image patch\n img_tensor = torch.tensor(img_arr).cuda().float().permute((2,0,1)).unsqueeze(dim=0)\n img_tensor_norm = ((img_tensor / 255.0) - self.mean) / self.std # (1,3,H,W)\n # Deal with the attention mask\n amask_tensor = torch.from_numpy(amask_arr).to(torch.bool).cuda().unsqueeze(dim=0) # (1,H,W)\n return NestedTensor(img_tensor_norm, amask_tensor)" }, { "identifier": "clip_box", "path": "lib/utils/box_ops.py", "snippet": "def clip_box(box: list, H, W, margin=0):\n x1, y1, w, h = box\n x2, y2 = x1 + w, y1 + h\n x1 = min(max(0, x1), W-margin)\n x2 = min(max(margin, x2), W)\n y1 = min(max(0, y1), H-margin)\n y2 = min(max(margin, y2), H)\n w = max(margin, x2-x1)\n h = max(margin, y2-y1)\n return [x1, y1, w, h]" }, { "identifier": "generate_mask_cond", "path": "lib/utils/ce_utils.py", "snippet": "def generate_mask_cond(cfg, bs, device, gt_bbox):\r\n template_size = cfg.DATA.TEMPLATE.SIZE\r\n stride = cfg.MODEL.BACKBONE.STRIDE\r\n template_feat_size = template_size // stride\r\n\r\n if cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'ALL':\r\n box_mask_z = None\r\n elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_POINT':\r\n if template_feat_size == 8:\r\n index = slice(3, 4)\r\n elif template_feat_size == 12:\r\n index = slice(5, 6)\r\n elif template_feat_size == 16:\r\n index = slice(7, 8)\r\n elif template_feat_size == 24:\r\n index = slice(11, 12)\r\n elif template_feat_size == 7:\r\n index = slice(3, 4)\r\n elif template_feat_size == 14:\r\n index = slice(6, 7)\r\n else:\r\n raise NotImplementedError\r\n box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device)\r\n box_mask_z[:, index, index] = 1\r\n box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'CTR_REC':\r\n # use fixed 4x4 region, 3:5 for 8x8\r\n # use fixed 4x4 region 5:6 for 12x12\r\n if template_feat_size == 8:\r\n index = slice(3, 5)\r\n elif template_feat_size == 12:\r\n index = slice(5, 7)\r\n elif template_feat_size == 7:\r\n index = slice(3, 4)\r\n else:\r\n raise NotImplementedError\r\n box_mask_z = torch.zeros([bs, template_feat_size, template_feat_size], device=device)\r\n box_mask_z[:, index, index] = 1\r\n box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n\r\n elif cfg.MODEL.BACKBONE.CE_TEMPLATE_RANGE == 'GT_BOX':\r\n box_mask_z = torch.zeros([bs, template_size, template_size], device=device)\r\n # box_mask_z_ori = data['template_seg'][0].view(-1, 1, *data['template_seg'].shape[2:]) # (batch, 1, 128, 128)\r\n box_mask_z = generate_bbox_mask(box_mask_z, gt_bbox * template_size).unsqueeze(1).to(\r\n torch.float) # (batch, 1, 128, 128)\r\n # box_mask_z_vis = box_mask_z.cpu().numpy()\r\n box_mask_z = F.interpolate(box_mask_z, scale_factor=1. / cfg.MODEL.BACKBONE.STRIDE, mode='bilinear',\r\n align_corners=False)\r\n box_mask_z = box_mask_z.flatten(1).to(torch.bool)\r\n # box_mask_z_vis = box_mask_z[:, 0, ...].cpu().numpy()\r\n # gaussian_maps_vis = generate_heatmap(data['template_anno'], self.cfg.DATA.TEMPLATE.SIZE, self.cfg.MODEL.STRIDE)[0].cpu().numpy()\r\n else:\r\n raise NotImplementedError\r\n\r\n return box_mask_z\r" } ]
import math import numpy as np import torch import cv2 import os from lib.models.odtrack import build_odtrack from lib.test.tracker.basetracker import BaseTracker from lib.test.tracker.vis_utils import gen_visualization from lib.test.utils.hann import hann2d from lib.train.data.processing_utils import sample_target from lib.test.tracker.data_utils import Preprocessor from lib.utils.box_ops import clip_box from lib.utils.ce_utils import generate_mask_cond
4,163
# for debug class ODTrack(BaseTracker): def __init__(self, params): super(ODTrack, self).__init__(params) network = build_odtrack(params.cfg, training=False) network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True) self.cfg = params.cfg self.network = network.cuda() self.network.eval() self.preprocessor = Preprocessor() self.state = None self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE # motion constrain
# for debug class ODTrack(BaseTracker): def __init__(self, params): super(ODTrack, self).__init__(params) network = build_odtrack(params.cfg, training=False) network.load_state_dict(torch.load(self.params.checkpoint, map_location='cpu')['net'], strict=True) self.cfg = params.cfg self.network = network.cuda() self.network.eval() self.preprocessor = Preprocessor() self.state = None self.feat_sz = self.cfg.TEST.SEARCH_SIZE // self.cfg.MODEL.BACKBONE.STRIDE # motion constrain
self.output_window = hann2d(torch.tensor([self.feat_sz, self.feat_sz]).long(), centered=True).cuda()
3
2023-12-10 03:57:19+00:00
8k
lumina-test/lumina
lumina/orchestrator/main.py
[ { "identifier": "config_stream_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_stream_handler(logger):\n \"\"\" Configure stream handler\n\n Args:\n logger (logging.Logger): Logger object\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n logger.addHandler(console)" }, { "identifier": "config_file_handler", "path": "lumina/utils/config_loggers.py", "snippet": "def config_file_handler(logger, log_file, no_format=False):\n \"\"\" Configure file handler\n\n Args:\n logger (logging.Logger): Logger object\n log_file (str): Log file path\n no_format (bool): If True, do not format log messages (default: False)\n\n Returns:\n N/A\n \"\"\"\n logger.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_file, mode=\"w\")\n if no_format == False:\n file_handler.setFormatter(logging.Formatter('%(name)-18s: %(levelname)-8s %(message)s'))\n file_handler.setLevel(logging.INFO)\n logger.addHandler(file_handler)" }, { "identifier": "get_packet_list", "path": "lumina/analyzer/pcap_processor/pcap_process.py", "snippet": "def get_packet_list(pcap_file):\n \"\"\" Read a pcap file and return a list of packets\n\n Args:\n pcap_file (str): The pcap file to read\n\n Returns:\n list: The list of packets if successful, empty list otherwise\n\n Raises:\n IOError: If the pcap file cannot be opened for reading\n Exception: If the pcap file cannot be read\n \"\"\"\n packet_list = []\n try:\n with open(pcap_file, 'rb') as file_read:\n pcap = dpkt.pcap.Reader(file_read)\n for packet in pcap:\n packet_list.append(roce_packet.RRoCEPacket(packet))\n except IOError:\n logging.error(\"Unable to open pcap file %s. Please check your filename.\" % pcap_file)\n raise IOError\n\n except:\n logging.error(\"Failed to read pcap file %s.\" % pcap_file)\n raise Exception\n\n logging.info(\"Successfully read %d packets from %s.\" % (len(packet_list), pcap_file))\n return packet_list" }, { "identifier": "SwitchCounter", "path": "lumina/analyzer/counter/switch_counter.py", "snippet": "class SwitchCounter:\n \"\"\" Class to parse switch counter files\n\n Attributes:\n _counter (dict of dict): the switch counters with the following format:\n {'requester': {'ingress': counter_value, 'egress': counter_value},\n 'responder': {'ingress': counter_value, 'egress': counter_value},\n 'requester-mirror': {'ingress': counter_value, 'egress': counter_value},\n 'responder-mirror': {'ingress': counter_value, 'egress': counter_value}}\n \"\"\"\n def __init__(self, snapshot_filename, port_map):\n \"\"\" Constructor\n\n Args:\n snapshot_filename (str): the file where switch dumps its counters\n port_map (dict): the mapping between port name and port number\n\n Returns:\n N/A\n \"\"\"\n with open(snapshot_filename, \"r\") as stream:\n conf = yaml.safe_load(stream)\n try:\n ingress_counters = conf['counter']['ingress']\n egress_counters = conf['counter']['egress']\n except:\n print(\"Bad yaml format in %s\" % snapshot_filename)\n sys.exit(-1)\n\n requester_port = port_map['requester']\n responder_port = port_map['responder']\n requester_mirror_port = port_map['requester-mirror']\n responder_mirror_port = port_map['responder-mirror']\n\n self._counter = {'requester' : {'ingress':0, 'egress': 0},\n 'responder' : {'ingress':0, 'egress': 0},\n 'requester-mirror' : {'ingress':0, 'egress': 0},\n 'responder-mirror' : {'ingress':0, 'egress': 0}}\n try:\n self._counter['requester']['ingress'] = ingress_counters[requester_port]\n self._counter['responder']['ingress'] = ingress_counters[responder_port]\n self._counter['requester-mirror']['ingress'] = ingress_counters[requester_mirror_port]\n self._counter['responder-mirror']['ingress'] = ingress_counters[responder_mirror_port]\n\n self._counter['requester']['egress'] = egress_counters[requester_port]\n self._counter['responder']['egress'] = egress_counters[responder_port]\n self._counter['requester-mirror']['egress'] = egress_counters[requester_mirror_port]\n self._counter['responder-mirror']['egress'] = egress_counters[responder_mirror_port]\n\n except:\n print(\"Port number not exist in the switch snapshot\")\n sys.exit(-1)\n\n def get_counter(self):\n \"\"\" Return the switch counters (dict of dict) \"\"\"\n return self._counter" }, { "identifier": "IntegrityCheck", "path": "lumina/analyzer/checker/integrity_check.py", "snippet": "class IntegrityCheck:\n \"\"\" Class to check the integrity of the trace according to pcap files, and timestamps\n\n Attributes:\n packet_list (list of RRoCEPacket objects): list of packets\n switch_counter (SwitchCounter object): switch counter\n requester_ip_list (list of str): IP addresses of the requester\n responder_ip_list (list of str): IP addresses of the responder\n \"\"\"\n def __init__(self, packet_list, switch_counter, requester_ip_list, responder_ip_list):\n \"\"\" Constructor\n\n Args:\n packet_list (list of RRoCEPacket objects): list of packets\n switch_counter (SwitchCounter object): switch counter\n requester_ip_list (list of str): IP addresses of the requester\n responder_ip_list (list of str): IP addresses of the responder\n\n Returns:\n N/A\n \"\"\"\n self.packet_list = packet_list\n self.switch_counter = switch_counter\n self.requester_ip_list = requester_ip_list\n self.responder_ip_list = responder_ip_list\n\n def check_no_packet_loss(self):\n \"\"\" Check if there is any packet loss\n\n Returns:\n bool: True if there is no packet loss\n \"\"\"\n result = True\n switch_port_counter = self.switch_counter.get_counter()\n\n num_requester_ingress_pkts = switch_port_counter['requester']['ingress']\n num_responder_ingress_pkts = switch_port_counter['responder']['ingress']\n num_total_ingress_pkts = num_requester_ingress_pkts + num_responder_ingress_pkts\n\n num_mirror_requester_pkts = switch_port_counter['requester-mirror']['egress']\n num_mirror_responder_pkts = switch_port_counter['responder-mirror']['egress']\n num_mirror_pkts = num_mirror_requester_pkts + num_mirror_responder_pkts\n\n if num_total_ingress_pkts != num_mirror_pkts:\n # Check if the switch has mirrored all the ingress packets\n logging.error(\"The total number of ingress packets is %d, \"\\\n \"while we only mirror %d packets\"\\\n % (num_total_ingress_pkts, num_mirror_pkts))\n result = False\n\n if num_mirror_pkts != len(self.packet_list):\n # Check if we have captured all the mirrored packets\n logging.error(\"The total number of mirrored packets is %d, \"\\\n \"while we only capture %d packets\"\\\n % (num_mirror_pkts, len(self.packet_list)))\n result = False\n return result\n\n def check_seqnum_consecutive(self):\n \"\"\" Check if sequence numbers of packets are consecutive\n\n Returns:\n bool: True if sequence numbers of packets are consecutive\n \"\"\"\n packet_list = self.packet_list\n num_pkts = len(packet_list)\n expect_switch_seqnum = packet_list[0].get_switch_seqnum()\n\n for i in range(num_pkts):\n current_switch_seqnum = packet_list[i].get_switch_seqnum()\n if current_switch_seqnum != expect_switch_seqnum:\n logging.error(\"For packet %d: expected sequence number %d,\"\\\n \"but get sequence number %d.\"\\\n % (i, expect_switch_seqnum, current_switch_seqnum))\n return False\n expect_switch_seqnum += 1\n\n return True\n\n def __check_tstamp_increasing(self, pkt_list, max_deviation_sec):\n \"\"\" Check if timestamps of packets keep increasing\n\n Args:\n pkt_list (list of RRoCEPacket objets): list of packets\n max_deviation_sec (float): maximum deviation in second\n\n Returns:\n bool: True if timestamps of packets keep increasing\n \"\"\"\n last_pkt_tstamp = 0\n\n for i in range(len(pkt_list)):\n current_pkt_tstamp = pkt_list[i].get_switch_timestamp()\n # In theory, the condition should be \"current_ts >= last_ts\".\n # However, we do see hardware has nanosecond-level deviations somtimes.\n if current_pkt_tstamp >= last_pkt_tstamp:\n last_pkt_tstamp = current_pkt_tstamp\n continue\n\n tstamp_delta = last_pkt_tstamp - current_pkt_tstamp\n # Tolerate some hardware deviations or timestamp wraparound\n if tstamp_delta <= max_deviation_sec or tstamp_delta >= (1<<47):\n last_pkt_tstamp = current_pkt_tstamp\n continue\n\n logging.error(\"Packet %d's timestamp (%.9f) < last one's timestamp (%.9f)\" %\\\n (i, current_pkt_tstamp, last_pkt_tstamp))\n return False\n\n return True\n\n def check_tstamp(self):\n \"\"\" Check if timestamps of packets satisfy the following requirements:\n\n 1. Timestamps of all the packets should keep increasing within a deviation\n 2. Timestamps of packets of a single direction should keep *strictly* increasing.\n\n Returns:\n bool: True if timestamps of packets satisfy the requirements\n \"\"\"\n # Up to 50ns deviation\n max_deviation_sec = 50e-9\n if self.__check_tstamp_increasing(self.packet_list, max_deviation_sec) == False:\n logging.error(\"Timestamps of all the packets do not keep increasing within %.9f sec\" %\\\n (max_deviation_sec))\n return False\n\n # Packets sent by the requester and responder, respectively\n requester_pkt_list = []\n responder_pkt_list = []\n\n for pkt in self.packet_list:\n src_ip = pkt.get_src_ip()\n if src_ip in self.requester_ip_list:\n requester_pkt_list.append(pkt)\n elif src_ip in self.responder_ip_list:\n responder_pkt_list.append(pkt)\n\n #logging.info(\"%d requester packets, %d responder packets, %d packets in total\" %\\\n # (len(requester_pkt_list), len(responder_pkt_list), len(self.packet_list)))\n\n if self.__check_tstamp_increasing(requester_pkt_list, 0) == False:\n logging.error(\"Timestamps of packets sent by the requester do not keep strictly increasing\")\n return False\n\n if self.__check_tstamp_increasing(responder_pkt_list, 0) == False:\n logging.error(\"Timestamps of packets sent by the responder do not keep strictly increasing\")\n return False\n\n return True\n\n def check(self):\n \"\"\" Check the integrity of the trace according to pcap files, and timestamps\n\n Returns:\n bool: True if the trace is valid\n \"\"\"\n return self.check_no_packet_loss() and self.check_seqnum_consecutive() and self.check_tstamp()" } ]
import argparse, sys, os, yaml, time, subprocess, logging, math, copy import lumina.orchestrator.host as host import lumina.orchestrator.switch as switch import lumina.analyzer.pcap_processor.pcap_process as pcap_process from lumina.utils.config_loggers import config_stream_handler, config_file_handler from lumina.analyzer.pcap_processor.pcap_process import get_packet_list from lumina.analyzer.counter.switch_counter import SwitchCounter from lumina.analyzer.checker.integrity_check import IntegrityCheck
5,301
return False logging.info("Experiment completed successfully") return True def clean_up(self): """ Clean up the environment after the experiment Returns: bool: True if the clean up is completed successfully, False otherwise """ logging.info("Start cleaning up the environment") if self.switch.clean_up() == False: logging.error("Failed to clean up switch") return False if self.requester.clean_up() == False: logging.error("Failed to clean up requester") return False if self.responder.clean_up() == False: logging.error("Failed to clean up responder") return False if self.requester_mirror.clean_up() == False: logging.error("Failed to clean up requester mirror") return False if self.responder_mirror.clean_up() == False: logging.error("Failed to clean up responder mirror") return False return True def fetch_results(self, iter_id=0): """ Fetch the results of iteration 'iter_id', including: 1. Switch table entries and counters 2. Packet trace (pcap file) 3. Configs and end-to-end results from RDMA hosts Args: iter_id (int, optional): iteration ID, defaults to 0 Returns: bool: True if the result collection is completed successfully, False otherwise """ ## Make the results dir if it does not exist iter_result_path = os.path.join(self.result_path, str(iter_id)) cmd = "mkdir -p %s" % iter_result_path try: subprocess.call(cmd, shell=True) except: logging.error("Failed to create result directory %s" % iter_result_path) return False if self.switch.fetch_results(iter_result_path) == False: logging.error("Failed to fetch results from switch") return False if self.requester_mirror.fetch_results(iter_result_path) == False: logging.error("Failed to fetch results from requester mirror") return False if self.responder_mirror.fetch_results(iter_result_path) == False: logging.error("Failed to fetch results from responder mirror") return False if self.requester.fetch_results(iter_result_path) == False: logging.error("Failed to fetch results from requester") return False if self.responder.fetch_results(iter_result_path) == False: logging.error("Failed to fetch results from responder") return False logging.info("Finished fetching results for iteration %d" % iter_id) return True def merge_traces(self, iter_id=0): iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR) src_pcap_file_list = [os.path.join(iter_pcap_dir_path, self.requester_mirror.conf['pkt-dump-conf']['dump-filename']), os.path.join(iter_pcap_dir_path, self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])] target_pcap_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR, self.aggregate_pcap_filename) packet_list = pcap_process.merge_pcaps(src_pcap_file_list) if packet_list is None: logging.error("Failed to merge pcap files for iteration %d" % iter_id) return False if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False: logging.error("Failed to dump packets to pcap file %s" % target_pcap_path) return False logging.info("Successfully merged pcap files for iteration %d" % iter_id) def check_integrity(self, iter_id=0): ## Check if the collected packet trace passes integrity check pcap_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR, self.aggregate_pcap_filename) packet_list = get_packet_list(pcap_path) packet_list.sort(key=lambda x:x.get_switch_seqnum()) logging.info("Packet trace sorted by switch sequence number.") switch_state_snapshot = os.path.join(self.result_path, str(iter_id), switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) port_map = {'requester': self.requester.conf['nic']['switch-port'], 'responder': self.responder.conf['nic']['switch-port'], 'requester-mirror': self.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': self.responder_mirror.conf['nic']['switch-port']} switch_counter = SwitchCounter(switch_state_snapshot, port_map)
## Logs will be logged into file LOG_FILENAME LOG_FILENAME = "run.log" ## Max # of experiment retries MAX_NB_EXP_RETRIES = 3 class Orchestrator: """ Class to manage the experiment """ def __init__(self, config_file): """ Constructor for Orchestrator class Args: config_file (str): path to the yaml (config) file. The file contains configs for switch, requester, responder, traffic, etc. Returns: N/A """ with open(config_file, "r") as stream: conf = yaml.safe_load(stream) try: local_workspace = conf['local-workspace'] result_path = conf['result-path'] switch_conf = conf['switch'] requester_conf = conf['requester'] responder_conf = conf['responder'] requester_mirror_conf = conf['requester-mirror'] responder_mirror_conf = conf['responder-mirror'] traffic_conf = conf['traffic'] rewrite_udp_dst_port = conf['rewrite-udp-dst-port'] num_repeats = conf['num-repeats'] agg_pcap_filename = conf['aggregate-pcap-filename'] except KeyError as e: print("Config file %s has a bad yaml format (key error: %s)" % (config_file, e)) sys.exit(-1) switch_conf['rewrite-udp-dst-port'] = rewrite_udp_dst_port requester_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port responder_mirror_conf['pkt-dump-conf']['rewrite-udp-dst-port'] = rewrite_udp_dst_port self.local_workspace = local_workspace self.result_path = result_path self.traffic_conf = traffic_conf self.num_repeats = num_repeats self.switch = switch.Switch(switch_conf) self.requester = host.RDMAHost(requester_conf) self.responder = host.RDMAHost(responder_conf) self.requester_mirror = host.MirrorHost(requester_mirror_conf) self.responder_mirror = host.MirrorHost(responder_mirror_conf) self.aggregate_pcap_filename = agg_pcap_filename cmd = "mkdir -p %s" % self.result_path subprocess.call(cmd, shell = True) def rm_old_files(self): """ Remove result files left by previous experiments """ old_iter_id = 0 old_iter_result_path = os.path.join(self.result_path, str(old_iter_id)) while os.path.exists(old_iter_result_path) and not os.path.isfile(old_iter_result_path): cmd = "rm -rf %s" % (old_iter_result_path) subprocess.call(cmd, shell=True) old_iter_id += 1 old_iter_result_path = os.path.join(self.result_path, str(old_iter_id)) def get_requester_ip_list(self): """ Return the list of requester IP addresses (without prefix length info) """ return [x.split('/')[0] for x in self.requester.conf['nic']['ip-list']] def get_responder_ip_list(self): """ Return the list of responder IP addresses (without prefix length info) """ return [x.split('/')[0] for x in self.responder.conf['nic']['ip-list']] def get_num_repeats(self): """ Return the number of experiment repeats """ return self.num_repeats def sync_and_compile(self): """ Syncronize and compile the code on all the hosts Returns: bool: True if the code is synced and compiled successfully, False otherwise """ logging.info("Sync and compile the code") ## Sync and compile the switch code ret = self.switch.sync_and_compile(self.local_workspace, switch.SWITCH_PROG_DIR_NAME, switch.SWITCH_PROG_FILE_NAME) if ret == False: logging.error("Failed to sync and compile the switch code") return False ## Sync and compile the traffic generator code rdma_verb = self.traffic_conf['rdma-verb'].strip().lower() if rdma_verb not in host.VALID_IB_VERB_LIST_LOWER: logging.error("Invalid RDMA verb: %s" % rdma_verb) return False ret = self.requester.sync_and_compile(local_workspace=self.local_workspace, prog_dir_name=self.requester.traffic_gen_dir_name(), prog_file_name=self.requester.traffic_gen_client_name(rdma_verb)) if ret == False: logging.error("Failed to sync and compile the traffic generator code on requester") return False ret = self.responder.sync_and_compile(local_workspace=self.local_workspace, prog_dir_name=self.requester.traffic_gen_dir_name(), prog_file_name=self.requester.traffic_gen_server_name(rdma_verb)) if ret == False: logging.error("Failed to sync and compile the traffic generator code on responder") return False ret = self.requester.sync(local_workspace=self.local_workspace, prog_dir_name=host.DUMP_COUNTER_DIR_NAME) if ret == False: logging.error("Failed to sync the dump counter code on requester") return False ret = self.responder.sync(local_workspace=self.local_workspace, prog_dir_name=host.DUMP_COUNTER_DIR_NAME) if ret == False: logging.error("Failed to sync the dump counter code on responder") return False ## Sync and compile the packet capture code ret = self.requester_mirror.sync_and_compile(local_workspace=self.local_workspace, prog_dir_name=host.PKT_CAPTURE_DIR_NAME, prog_file_name=host.PKT_CAPTURE_FILE_NAME) if ret == False: logging.error("Failed to sync and compile the packet capture code on requester_mirror") return False ret = self.responder_mirror.sync_and_compile(local_workspace=self.local_workspace, prog_dir_name=host.PKT_CAPTURE_DIR_NAME, prog_file_name=host.PKT_CAPTURE_FILE_NAME) if ret == False: logging.error("Failed to sync and compile the packet capture code on responder_mirror") return False return True def generate_switch_table_config(self): """ Generate the switch configuration, including: 1. Forward table 2. Mirror table 3. ARP table 4. Traffic table, including the events to inject Returns: bool: True if the switch configuration is generated successfully, False otherwise """ requester_nic_conf = self.requester.conf['nic'] responder_nic_conf = self.responder.conf['nic'] requester_mirror_nic_conf = self.requester_mirror.conf['nic'] responder_mirror_nic_conf = self.responder_mirror.conf['nic'] ## Set up forward table entries self.switch.conf['forward-table'] = [] try: for nic_conf, host_type in zip([requester_nic_conf, responder_nic_conf, \ requester_mirror_nic_conf, responder_mirror_nic_conf], ['requester', 'responder', 'requester_mirror', 'responder_mirror']): forward_table_entry = {'dst-mac': nic_conf['mac'], 'eg-port': nic_conf['switch-port'], 'host': host_type} self.switch.conf['forward-table'].append(forward_table_entry) except: logging.error("Failed to set forward table") return False ## Set up mirror table entries, use ingress_to_egress try: requester_mirror_entry = {'direction': 'ingress_to_egress', 'src-port': requester_nic_conf['switch-port'], 'dst-port': requester_mirror_nic_conf['switch-port']} responder_mirror_entry = {'direction': 'ingress_to_egress', 'src-port': responder_nic_conf['switch-port'], 'dst-port': responder_mirror_nic_conf['switch-port']} self.switch.conf['mirror-table'] = [requester_mirror_entry, responder_mirror_entry] except: logging.error("Failed to set mirror table") return False requester_mac = requester_nic_conf['mac'] responder_mac = responder_nic_conf['mac'] requester_ip_list = requester_nic_conf['ip-list'] responder_ip_list = responder_nic_conf['ip-list'] ## Set up arp table entries arp_entries = [] try: for dst_ip_list, dst_mac in zip([requester_ip_list, responder_ip_list], [requester_mac, responder_mac]): for dst_ip_subnet in dst_ip_list: dst_ip = dst_ip_subnet.split('/')[0] arp_entries.append({'dst-ip': dst_ip, 'dst-mac': dst_mac}) self.switch.conf['arp-table'] = arp_entries except: logging.error("Failed to set ARP table") return False ## Generate the events of each iteration for switch config per_iter_event_list = self.traffic_conf['data-pkt-events'] msg_size = self.traffic_conf['message-size'] mtu = self.traffic_conf['mtu'] num_msgs_per_qp = self.traffic_conf['num-msgs-per-qp'] num_pkts_per_msg = int(math.ceil(msg_size / mtu)) self.switch.conf['traffic'] = {} self.switch.conf['traffic']['num-msgs-per-qp'] = num_msgs_per_qp self.switch.conf['traffic']['num-pkts-per-msg'] = num_pkts_per_msg self.switch.conf['traffic']['data-pkt-events'] = [] if per_iter_event_list is None or len(per_iter_event_list) == 0: ## No events at all return True for i in range(num_msgs_per_qp): for per_iter_event in per_iter_event_list: global_event = copy.deepcopy(per_iter_event) ## This event is applied to all the packets of the message. We need to expand it! if str(global_event['psn']).lower() == 'all': for psn in range(num_pkts_per_msg): global_event['psn'] = psn + i * num_pkts_per_msg self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event)) else: global_event['psn'] += i * num_pkts_per_msg self.switch.conf['traffic']['data-pkt-events'].append(copy.deepcopy(global_event)) return True def ping_mesh(self): """ Ping all the IP addresses between requester and responder to check the connectivity Returns: bool: True if all the IP addresses can be pinged successfully, False otherwise """ for requester_ip_subnet in self.requester.conf['nic']['ip-list']: requester_ip = requester_ip_subnet.split('/')[0] command = "ping " + requester_ip + " -c 5 -i 0.2" ret_val, err_info, exit_status = self.responder.execute_command(command) if exit_status != 0: logging.error("Failed to ping ip " + requester_ip) logging.error("[Command return info]: %s %s" % (', '.join(ret_val), ', '.join(err_info))) return False for responder_ip_subnet in self.responder.conf['nic']['ip-list']: responder_ip = responder_ip_subnet.split('/')[0] command = "ping " + responder_ip + " -c 5 -i 0.2" ret_val, err_info, exit_status = self.requester.execute_command(command) if exit_status != 0: logging.error("Failed to ping ip " + responder_ip) logging.error("[Command return info]: %s %s" % (ret_val, err_info)) return False logging.info("Successfully pinged all the IP addresses between requester and responder") return True def generate_switch_config_file(self): """ Generate the switch configuration file and copy it to the switch Returns: bool: True if the switch configuration file is generated and copied successfully, False otherwise """ ## Get the mac address for all the hosts self.requester.get_mac_address() self.responder.get_mac_address() self.requester_mirror.get_mac_address() self.responder_mirror.get_mac_address() ## Generate config for Match-Action table in switch if self.generate_switch_table_config() == False: logging.error("Failed to generate switch table configuration") return False ## Dump the switch configuration into a file, and copy it to the switch if self.switch.dump_controller_config(self.local_workspace) == False: logging.error("Failed to dump switch config") return False return True def __is_valid_traffc(self): """ Check if the traffic configuration is valid, including: 1. The tx-depth should be 1 or > 1 2. If tx-depth > 1, then we can only inject ECN marking events Returns: bool: True if the traffic configuration is valid, False otherwise """ try: data_pkt_events = self.traffic_conf['data-pkt-events'] tx_depth = self.traffic_conf['tx-depth'] if tx_depth == 1: return True elif tx_depth <= 0: return False for event in data_pkt_events: if event['type'] != 'ecn': logging.error("Cannot inject %s event when tx depth = %d" % (event['type'], tx_depth)) return False except: logging.error("Failed to parse traffic configuration") return False return True def run_experiment(self): """ Run the experiment Returns: bool: True if the experiment is completed successfully, False otherwise """ ## Check if traffic configuration is valid if self.__is_valid_traffc() == False: logging.error("Invalid traffic configuration") return False ## Run switch program if self.switch.run_switch() == False: logging.error("Failed to run switch") return False ## Sleep for 1 second to make sure control plane is listenning (for client message) time.sleep(1) ## Configure the servers if self.requester.config_traffic_gen() == False: logging.error("Failed to config RDMA requester") return False if self.responder.config_traffic_gen() == False: logging.error("Failed to config RDMA responder") return False if self.requester_mirror.config_packet_capture() == False: logging.error("Failed to config packet capture on requester mirror") return False if self.responder_mirror.config_packet_capture() == False: logging.error("Failed to config packet capture on responder mirror") return False ## Check the connectivity through pingmesh (try 5 rounds) num_tries = 0 pingmesh_ret = False while num_tries < 5: pingmesh_ret = self.ping_mesh() if pingmesh_ret == True: break num_tries += 1 time.sleep(1) if pingmesh_ret == False: logging.error("Failed to ping all the IP addresses between requester and responder") return False ## Launch packet capture for both side ## Prerequisite: config hugepage and igb_uio if needed if self.requester_mirror.run_packet_capture() == False: logging.error("Failed to run packet capture on requester mirror") return False if self.responder_mirror.run_packet_capture() == False: logging.error("Failed to run packet capture on responder mirror") return False time.sleep(3) ## Dump the counters before running if self.requester.dump_counters(host.REQ_START_COUNTER_FILE_NAME) == False: logging.error("Failed to dump counters on requester before running") return False if self.responder.dump_counters(host.RSP_START_COUNTER_FILE_NAME) == False: logging.error("Failed to dump counters on responder before running") return False ## Launch RDMA server first run_server_ret = self.responder.run_traffic_gen_server(self.traffic_conf) if run_server_ret == False: logging.error("Failed to run RDMA server") return False time.sleep(2) ## Launch RDMA client try: destination_ip_subnet = self.responder.conf['nic']['ip-list'][0] destination_ip = destination_ip_subnet.split('/')[0] except: logging.error("Failed to get destination IP") return False run_client_ret = self.requester.run_traffic_gen_client(traffic_conf=self.traffic_conf, destination_ip=destination_ip, controller_ip=self.switch.conf['control-ip'], controller_listen_port=self.switch.conf['listen-port']) if run_client_ret == False: logging.error("Failed to run RDMA client") return False if self.switch.dump_results() == False: logging.error("Failed to dump results from switch") return False if self.requester.dump_counters(host.REQ_FINISH_COUNTER_FILE_NAME) == False: logging.error("Failed to dump counters on requester after running") return False if self.responder.dump_counters(host.RSP_FINISH_COUNTER_FILE_NAME) == False: logging.error("Failed to dump counters on responder after running") return False logging.info("Experiment completed successfully") return True def clean_up(self): """ Clean up the environment after the experiment Returns: bool: True if the clean up is completed successfully, False otherwise """ logging.info("Start cleaning up the environment") if self.switch.clean_up() == False: logging.error("Failed to clean up switch") return False if self.requester.clean_up() == False: logging.error("Failed to clean up requester") return False if self.responder.clean_up() == False: logging.error("Failed to clean up responder") return False if self.requester_mirror.clean_up() == False: logging.error("Failed to clean up requester mirror") return False if self.responder_mirror.clean_up() == False: logging.error("Failed to clean up responder mirror") return False return True def fetch_results(self, iter_id=0): """ Fetch the results of iteration 'iter_id', including: 1. Switch table entries and counters 2. Packet trace (pcap file) 3. Configs and end-to-end results from RDMA hosts Args: iter_id (int, optional): iteration ID, defaults to 0 Returns: bool: True if the result collection is completed successfully, False otherwise """ ## Make the results dir if it does not exist iter_result_path = os.path.join(self.result_path, str(iter_id)) cmd = "mkdir -p %s" % iter_result_path try: subprocess.call(cmd, shell=True) except: logging.error("Failed to create result directory %s" % iter_result_path) return False if self.switch.fetch_results(iter_result_path) == False: logging.error("Failed to fetch results from switch") return False if self.requester_mirror.fetch_results(iter_result_path) == False: logging.error("Failed to fetch results from requester mirror") return False if self.responder_mirror.fetch_results(iter_result_path) == False: logging.error("Failed to fetch results from responder mirror") return False if self.requester.fetch_results(iter_result_path) == False: logging.error("Failed to fetch results from requester") return False if self.responder.fetch_results(iter_result_path) == False: logging.error("Failed to fetch results from responder") return False logging.info("Finished fetching results for iteration %d" % iter_id) return True def merge_traces(self, iter_id=0): iter_pcap_dir_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR) src_pcap_file_list = [os.path.join(iter_pcap_dir_path, self.requester_mirror.conf['pkt-dump-conf']['dump-filename']), os.path.join(iter_pcap_dir_path, self.responder_mirror.conf['pkt-dump-conf']['dump-filename'])] target_pcap_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR, self.aggregate_pcap_filename) packet_list = pcap_process.merge_pcaps(src_pcap_file_list) if packet_list is None: logging.error("Failed to merge pcap files for iteration %d" % iter_id) return False if pcap_process.dump_pkts_to_pcap(target_pcap_path, packet_list) == False: logging.error("Failed to dump packets to pcap file %s" % target_pcap_path) return False logging.info("Successfully merged pcap files for iteration %d" % iter_id) def check_integrity(self, iter_id=0): ## Check if the collected packet trace passes integrity check pcap_path = os.path.join(self.result_path, str(iter_id), host.PCAP_RESULT_DIR, self.aggregate_pcap_filename) packet_list = get_packet_list(pcap_path) packet_list.sort(key=lambda x:x.get_switch_seqnum()) logging.info("Packet trace sorted by switch sequence number.") switch_state_snapshot = os.path.join(self.result_path, str(iter_id), switch.SWITCH_RESULT_DIR, switch.SWITCH_STATE_SNAPSHOT) port_map = {'requester': self.requester.conf['nic']['switch-port'], 'responder': self.responder.conf['nic']['switch-port'], 'requester-mirror': self.requester_mirror.conf['nic']['switch-port'], 'responder-mirror': self.responder_mirror.conf['nic']['switch-port']} switch_counter = SwitchCounter(switch_state_snapshot, port_map)
integrity_checker = IntegrityCheck(packet_list=packet_list,
4
2023-12-09 08:21:14+00:00
8k
equilibration/equipy
equipy/graphs/_arrow_plot.py
[ { "identifier": "permutations_columns", "path": "equipy/utils/permutations/_compute_permutations.py", "snippet": "def permutations_columns(sensitive_features):\n \"\"\"\n Generate permutations of columns in the input array sensitive_features.\n\n Parameters\n ----------\n sensitive_features : array-like\n Input array where each column represents a different sensitive feature.\n\n Returns\n -------\n dict\n A dictionary where keys are tuples representing permutations of column indices,\n and values are corresponding permuted arrays of sensitive features.\n\n Example\n -------\n >>> sensitive_features = [[1, 2], [3, 4], [5, 6]]\n >>> generate_permutations_cols(sensitive_features)\n {(0, 1): [[1, 2], [3, 4], [5, 6]], (1, 0): [[3, 4], [1, 2], [5, 6]]}\n\n Note\n ----\n This function generates all possible permutations of columns and stores them in a dictionary.\n \"\"\"\n n = len(sensitive_features[0])\n ind_cols = list(range(n))\n permut_cols = list(itertools.permutations(ind_cols))\n sensitive_features_with_ind = np.vstack((ind_cols, sensitive_features))\n\n dict_all_combs = {}\n for permutation in permut_cols:\n permuted_sensitive_features = sensitive_features_with_ind[:, permutation]\n\n key = tuple(permuted_sensitive_features[0])\n\n values = permuted_sensitive_features[1:].tolist()\n dict_all_combs[key] = values\n\n return dict_all_combs" }, { "identifier": "calculate_perm_wasserstein", "path": "equipy/utils/permutations/_compute_permutations.py", "snippet": "def calculate_perm_wasserstein(y_calib, sensitive_features_calib, y_test, sensitive_features_test, epsilon=None):\n \"\"\"\n Calculate Wasserstein distance for different permutations of sensitive features between calibration and test sets.\n\n Parameters\n ----------\n y_calib : array-like\n Calibration set predictions.\n sensitive_features_calib : array-like\n Calibration set sensitive features.\n y_test : array-like\n Test set predictions.\n sensitive_features_test : array-like\n Test set sensitive features.\n epsilon : array-like or None, optional\n Fairness constraints. Defaults to None.\n\n Returns\n -------\n dict\n A dictionary where keys are tuples representing permutations of column indices,\n and values are corresponding sequential fairness values for each permutation.\n\n Example\n -------\n >>> y_calib = [1, 2, 3]\n >>> sensitive_features_calib = [[1, 2], [3, 4], [5, 6]]\n >>> y_test = [4, 5, 6]\n >>> sensitive_features_test = [[7, 8], [9, 10], [11, 12]]\n >>> calculate_perm_wst(y_calib, sensitive_features_calib, y_test, sensitive_features_test)\n {(0, 1): {'Base model': 0.5, 'sens_var_1': 0.2}, (1, 0): {'Base model': 0.3, 'sens_var_0': 0.6}}\n\n Note\n ----\n This function calculates Wasserstein distance for different permutations of sensitive features\n between calibration and test sets and stores the sequential fairness values in a dictionary.\n \"\"\"\n all_perm_calib = permutations_columns(sensitive_features_calib)\n all_perm_test = permutations_columns(sensitive_features_test)\n if epsilon is not None:\n all_perm_epsilon = permutations_columns(np.array([np.array(epsilon).T]))\n for key in all_perm_epsilon.keys():\n all_perm_epsilon[key] = all_perm_epsilon[key][0]\n\n store_dict = {}\n for key in all_perm_calib:\n wst = MultiWasserstein()\n wst.fit(y_calib, np.array(all_perm_calib[key]))\n if epsilon is None:\n wst.transform(y_test, np.array(\n all_perm_test[key]))\n else:\n wst.transform(y_test, np.array(\n all_perm_test[key]), all_perm_epsilon[key])\n store_dict[key] = wst.y_fair\n old_keys = list(store_dict[key].keys())\n new_keys = ['Base model'] + [f'sens_var_{k}' for k in key]\n key_mapping = dict(zip(old_keys, new_keys))\n store_dict[key] = {key_mapping[old_key]\n : value for old_key, value in store_dict[key].items()}\n return store_dict" }, { "identifier": "unfairness_permutations", "path": "equipy/utils/permutations/metrics/_fairness_permutations.py", "snippet": "def unfairness_permutations(permut_y_fair_dict, all_combs_sensitive_features):\n \"\"\"\n Compute unfairness values for multiple fair output datasets and multiple sensitive attribute datasets.\n\n Parameters\n ----------\n permut_y_fair_dict : dict\n A dictionary containing permutations of fair output datasets.\n all_combs_sensitive_features : dict\n A dictionary containing combinations of columns permutations for sensitive attribute datasets.\n\n Returns\n -------\n list\n A list of dictionaries containing unfairness values for each permutation of fair output datasets.\n\n Example\n -------\n >>> permut_y_fair_dict = {(1,2): {'Base model':np.array([19,39,65]), 'sens_var_1':np.array([22,40,50]), 'sens_var_2':np.array([28,39,42])},\n ... (2,1): {'Base model':np.array([19,39,65]), 'sens_var_2':np.array([34,39,60]), 'sens_var_1':np.array([28,39,42])}}\n >>> all_combs_sensitive_features = {(1,2): np.array([['blue', 2], ['red', 9], ['green', 5]]),\n ... (2,1): np.array([[2, 'blue'], [9, 'red'], [5, 'green']])}\n >>> unfs_list = compute_unfairness_permutations(permut_y_fair_dict, all_combs_sensitive_features)\n >>> print(unfs_list)\n [{'sens_var_0': 46.0, 'sens_var_1': 28.0, 'sens_var_2': 14.0}, \n {'sens_var_0': 46.0, 'sens_var_1': 26.0, 'sens_var_2': 14.0}]\n \"\"\"\n unfs_list = []\n for key, value in permut_y_fair_dict.items():\n unfs_list.append(unfairness_dict(\n value, np.array(all_combs_sensitive_features[key])))\n return unfs_list" }, { "identifier": "performance_permutations", "path": "equipy/utils/permutations/metrics/_performance_permutations.py", "snippet": "def performance_permutations(y_true, permut_y_fair_dict, metric=mean_squared_error):\n \"\"\"\n Compute the performance values for multiple fair output datasets compared to the true labels, considering permutations.\n\n Parameters\n ----------\n y_true : array-like\n True labels or ground truth values.\n permut_y_fair_dict : dict\n A dictionary containing permutations of fair output datasets.\n metric : callable, optional\n The metric used to compute the performance, default=sklearn.metrics.mean_square_error.\n\n Returns\n -------\n list\n A list of dictionaries containing performance values for each permutation of fair output datasets.\n\n Example\n -------\n >>> y_true = np.array([15, 38, 68])\n >>> permut_y_fair_dict = {(1,2): {'Base model':np.array([19,39,65]), 'sens_var_1':np.array([22,40,50]), 'sens_var_2':np.array([28,39,42])},\n ... (2,1): {'Base model':np.array([19,39,65]), 'sens_var_2':np.array([34,39,60]), 'sens_var_1':np.array([28,39,42])}}\n >>> performance_values = compute_performance_permutations(y_true, permut_y_fair_dict)\n >>> print(performance_values)\n [{'Base model': 8.666666666666666, 'sens_var_1': 125.66666666666667, 'sens_var_2': 282.0}, \n {'Base model': 8.666666666666666, 'sens_var_2': 142.0, 'sens_var_1': 282.0}]\n \"\"\"\n performance_list = []\n for value in permut_y_fair_dict.values():\n performance_list.append(performance_dict(\n y_true, value, metric))\n return performance_list" } ]
import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import mean_squared_error from ..utils.permutations._compute_permutations import permutations_columns, calculate_perm_wasserstein from ..utils.permutations.metrics._fairness_permutations import unfairness_permutations from ..utils.permutations.metrics._performance_permutations import performance_permutations
3,738
line.axes.annotate(label, xytext=( x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10) ax.scatter(x[i], y[i], label=label, marker="+", s=150) elif (i == len(x)-1) & (final_model): label = f"$A_{1}$" + r"$_:$" + f"$_{i}$-fair" line.axes.annotate(label, xytext=( x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10) ax.scatter(x[i], y[i], label=label, marker="*", s=150) elif (i == 2) & (i < len(x)-1): label = f"$A_{sens[1]}$" + r"$_,$" + f"$_{sens[i]}$-fair" line.axes.annotate(label, xytext=( x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10) ax.scatter(x[i], y[i], label=label, marker="+", s=150) else: ax.scatter(x[i], y[i], marker="+", s=150, color="grey", alpha=0.4) ax.set_xlabel("Unfairness") ax.set_ylabel("Performance") ax.set_xlim((np.min(x)-np.min(x)/10-np.max(x)/10, np.max(x)+np.min(x)/10+np.max(x)/10)) ax.set_ylim((np.min(y)-np.min(y)/10-np.max(y)/10, np.max(y)+np.min(y)/10+np.max(y)/10)) ax.set_title("Exact fairness") ax.legend(loc="best") def _fair_custimized_arrow_plot(unfs_list, performance_list): """ Plot arrows representing the fairness-performance ccombinations step by step (by sensitive attribute) to reach fairness for all permutations (order of sensitive variables for which fairness is calculated). Parameters ---------- unfs_list : list A list of dictionaries containing unfairness values for each permutation of fair output datasets. performance_list : list A list of dictionaries containing performance values for each permutation of fair output datasets. Returns ------- matplotlib.figure.Figure arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for each combination. Plotting Conventions -------------------- - Arrows represent different fairness-performance combinations for each scenario in the input lists. - Axes are labeled for unfairness (x-axis) and performance (y-axis). Example Usage ------------- >>> arrow_plot_permutations(unfs_list, performance_list) Note ---- This function uses a global variable `ax` for plotting, ensuring compatibility with external code. """ global ax fig, ax = plt.subplots() for i in range(len(unfs_list)): if i == 0: fair_arrow_plot(unfs_list[i], performance_list[i], permutations=True, final_model=False) elif i == len(unfs_list)-1: fair_arrow_plot(unfs_list[i], performance_list[i], permutations=True, base_model=False) else: fair_arrow_plot(unfs_list[i], performance_list[i], permutations=True, base_model=False, final_model=False) def fair_multiple_arrow_plot(sensitive_features_calib, sensitive_features_test, y_calib, y_test, y_true_test, epsilon=None, test_size=0.3, permutation=True, metric=mean_squared_error): """ Plot arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for different permutations. Parameters ---------- sensitive_features_calib : numpy.ndarray Sensitive features for calibration. sensitive_features_test : numpy.ndarray Sensitive features for testing. y_calib : numpy.ndarray Predictions for calibration. y_test : numpy.ndarray Predictions for testing. y_true_test : numpy.ndarray True labels for testing. epsilon : float, optional Epsilon value for calculating Wasserstein distance. Defaults to None. test_size : float, optional Size of the testing set. Defaults to 0.3. permutation : bool, optional If True, displays permutations of arrows based on input dictionaries. Defaults to True. metric : function, optional The metric used to evaluate performance. Defaults to mean_squared_error. Returns ------- matplotlib.axes.Axes Arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for different permutations. Plotting Conventions -------------------- - Arrows represent different fairness-performance combinations for each permutation. - Axes are labeled for unfairness (x-axis) and performance (y-axis). Example Usage ------------- >>> custom_fair_arrow_plot(sensitive_features_calib, sensitive_features_test, y_calib, y_test, y_true_test) Note ---- This function uses a global variable `ax` for plotting, ensuring compatibility with external code. """ permut_y_fair_dict = calculate_perm_wasserstein( y_calib, sensitive_features_calib, y_test, sensitive_features_test, epsilon=epsilon) all_combs_sensitive_features_test = permutations_columns( sensitive_features_test) unfs_list = unfairness_permutations( permut_y_fair_dict, all_combs_sensitive_features_test)
def fair_arrow_plot(unfs_dict, performance_dict, permutations=False, base_model=True, final_model=True): """ Generates an arrow plot representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness. Parameters ---------- unfs_dict : dict A dictionary containing unfairness values associated with the sequentially fair output datasets. performance_dict : dict A dictionary containing performance values associated with the sequentially fair output datasets. permutations : bool, optional If True, displays permutations of arrows based on input dictionaries. Defaults to False. base_model : bool, optional If True, includes the base model arrow. Defaults to True. final_model : bool, optional If True, includes the final model arrow. Defaults to True. Returns ------- matplotlib.figure.Figure arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness. Plotting Conventions -------------------- - Arrows represent different fairness-performance combinations. - Axes are labeled for unfairness (x-axis) and performance (y-axis). Note ---- - This function uses a global variable `ax` for plotting, ensuring compatibility with external code. """ x = [] y = [] sens = [0] for i, key in enumerate(unfs_dict.keys()): x.append(unfs_dict[key]) if i != 0: sens.append(int(key[9:])) for key in performance_dict.keys(): y.append(performance_dict[key]) global ax if not permutations: fig, ax = plt.subplots() line = ax.plot(x, y, linestyle="--", alpha=0.25, color="grey")[0] for i in range(len(sens)): if (i == 0) & (base_model): line.axes.annotate(f"Base\nmodel", xytext=( x[0]+np.min(x)/20, y[0]), xy=(x[0], y[0]), size=10) ax.scatter(x[0], y[0], label="Base model", marker="^", s=100) elif i == 1: label = f"$A_{sens[i]}$-fair" line.axes.annotate(label, xytext=( x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10) ax.scatter(x[i], y[i], label=label, marker="+", s=150) elif (i == len(x)-1) & (final_model): label = f"$A_{1}$" + r"$_:$" + f"$_{i}$-fair" line.axes.annotate(label, xytext=( x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10) ax.scatter(x[i], y[i], label=label, marker="*", s=150) elif (i == 2) & (i < len(x)-1): label = f"$A_{sens[1]}$" + r"$_,$" + f"$_{sens[i]}$-fair" line.axes.annotate(label, xytext=( x[i]+np.min(x)/20, y[i]), xy=(x[i], y[i]), size=10) ax.scatter(x[i], y[i], label=label, marker="+", s=150) else: ax.scatter(x[i], y[i], marker="+", s=150, color="grey", alpha=0.4) ax.set_xlabel("Unfairness") ax.set_ylabel("Performance") ax.set_xlim((np.min(x)-np.min(x)/10-np.max(x)/10, np.max(x)+np.min(x)/10+np.max(x)/10)) ax.set_ylim((np.min(y)-np.min(y)/10-np.max(y)/10, np.max(y)+np.min(y)/10+np.max(y)/10)) ax.set_title("Exact fairness") ax.legend(loc="best") def _fair_custimized_arrow_plot(unfs_list, performance_list): """ Plot arrows representing the fairness-performance ccombinations step by step (by sensitive attribute) to reach fairness for all permutations (order of sensitive variables for which fairness is calculated). Parameters ---------- unfs_list : list A list of dictionaries containing unfairness values for each permutation of fair output datasets. performance_list : list A list of dictionaries containing performance values for each permutation of fair output datasets. Returns ------- matplotlib.figure.Figure arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for each combination. Plotting Conventions -------------------- - Arrows represent different fairness-performance combinations for each scenario in the input lists. - Axes are labeled for unfairness (x-axis) and performance (y-axis). Example Usage ------------- >>> arrow_plot_permutations(unfs_list, performance_list) Note ---- This function uses a global variable `ax` for plotting, ensuring compatibility with external code. """ global ax fig, ax = plt.subplots() for i in range(len(unfs_list)): if i == 0: fair_arrow_plot(unfs_list[i], performance_list[i], permutations=True, final_model=False) elif i == len(unfs_list)-1: fair_arrow_plot(unfs_list[i], performance_list[i], permutations=True, base_model=False) else: fair_arrow_plot(unfs_list[i], performance_list[i], permutations=True, base_model=False, final_model=False) def fair_multiple_arrow_plot(sensitive_features_calib, sensitive_features_test, y_calib, y_test, y_true_test, epsilon=None, test_size=0.3, permutation=True, metric=mean_squared_error): """ Plot arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for different permutations. Parameters ---------- sensitive_features_calib : numpy.ndarray Sensitive features for calibration. sensitive_features_test : numpy.ndarray Sensitive features for testing. y_calib : numpy.ndarray Predictions for calibration. y_test : numpy.ndarray Predictions for testing. y_true_test : numpy.ndarray True labels for testing. epsilon : float, optional Epsilon value for calculating Wasserstein distance. Defaults to None. test_size : float, optional Size of the testing set. Defaults to 0.3. permutation : bool, optional If True, displays permutations of arrows based on input dictionaries. Defaults to True. metric : function, optional The metric used to evaluate performance. Defaults to mean_squared_error. Returns ------- matplotlib.axes.Axes Arrows representing the fairness-performance combinations step by step (by sensitive attribute) to reach fairness for different permutations. Plotting Conventions -------------------- - Arrows represent different fairness-performance combinations for each permutation. - Axes are labeled for unfairness (x-axis) and performance (y-axis). Example Usage ------------- >>> custom_fair_arrow_plot(sensitive_features_calib, sensitive_features_test, y_calib, y_test, y_true_test) Note ---- This function uses a global variable `ax` for plotting, ensuring compatibility with external code. """ permut_y_fair_dict = calculate_perm_wasserstein( y_calib, sensitive_features_calib, y_test, sensitive_features_test, epsilon=epsilon) all_combs_sensitive_features_test = permutations_columns( sensitive_features_test) unfs_list = unfairness_permutations( permut_y_fair_dict, all_combs_sensitive_features_test)
performance_list = performance_permutations(
3
2023-12-06 14:43:41+00:00
8k
Tlntin/booking_simulator
apps/agentfabric/appBot.py
[ { "identifier": "get_avatar_image", "path": "config_utils.py", "snippet": "def get_avatar_image(bot_avatar, uuid_str=''):\n user_avatar_path = os.path.join(\n os.path.dirname(__file__), 'assets/user.jpg')\n bot_avatar_path = os.path.join(os.path.dirname(__file__), 'assets/bot.jpg')\n if len(bot_avatar) > 0:\n bot_avatar_path = os.path.join(DEFAULT_BUILDER_CONFIG_DIR, uuid_str,\n bot_avatar)\n if uuid_str != '':\n # use default if not exists\n if not os.path.exists(bot_avatar_path):\n # create parents directory\n os.makedirs(os.path.dirname(bot_avatar_path), exist_ok=True)\n # copy the template to the address\n temp_bot_avatar_path = os.path.join(DEFAULT_BUILDER_CONFIG_DIR,\n bot_avatar)\n if not os.path.exists(temp_bot_avatar_path):\n # fall back to default local avatar image\n temp_bot_avatar_path = os.path.join('./config', bot_avatar)\n if not os.path.exists(temp_bot_avatar_path):\n temp_bot_avatar_path = os.path.join(\n './config', 'custom_bot_avatar.png')\n\n shutil.copy(temp_bot_avatar_path, bot_avatar_path)\n\n return [user_avatar_path, bot_avatar_path]" }, { "identifier": "get_ci_dir", "path": "config_utils.py", "snippet": "def get_ci_dir():\n return DEFAULT_CODE_INTERPRETER_DIR" }, { "identifier": "parse_configuration", "path": "config_utils.py", "snippet": "def parse_configuration(uuid_str=''):\n \"\"\"parse configuration\n\n Args:\n\n Returns:\n dict: parsed configuration\n\n \"\"\"\n model_cfg_file = os.getenv('MODEL_CONFIG_FILE', DEFAULT_MODEL_CONFIG_FILE)\n\n builder_cfg_file = get_user_cfg_file(uuid_str)\n # use default if not exists\n if not os.path.exists(builder_cfg_file):\n # create parents directory\n os.makedirs(os.path.dirname(builder_cfg_file), exist_ok=True)\n # copy the template to the address\n builder_cfg_file_temp = './config/builder_config.json'\n\n if builder_cfg_file_temp != builder_cfg_file:\n shutil.copy(builder_cfg_file_temp, builder_cfg_file)\n\n tool_cfg_file = os.getenv('TOOL_CONFIG_FILE', DEFAULT_TOOL_CONFIG_FILE)\n\n builder_cfg = Config.from_file(builder_cfg_file)\n model_cfg = Config.from_file(model_cfg_file)\n tool_cfg = Config.from_file(tool_cfg_file)\n\n tools_info = builder_cfg.tools\n available_tool_list = []\n for key, value in tools_info.items():\n if value['use']:\n available_tool_list.append(key)\n tool_cfg[key]['use'] = value['use']\n\n openapi_plugin_file = get_user_openapi_plugin_cfg_file(uuid_str)\n plugin_cfg = {}\n available_plugin_list = []\n if os.path.exists(openapi_plugin_file):\n openapi_plugin_cfg = Config.from_file(openapi_plugin_file)\n try:\n config_dict = openapi_schema_convert(\n schema=openapi_plugin_cfg.schema,\n auth=openapi_plugin_cfg.auth.to_dict())\n plugin_cfg = Config(config_dict)\n for name, config in config_dict.items():\n available_plugin_list.append(name)\n except Exception as e:\n error = traceback.format_exc()\n print(f'Error:{e}, with detail: {error}')\n print(\n 'Error:FormatError, with detail: The format of the plugin config file is incorrect.'\n )\n\n return builder_cfg, model_cfg, tool_cfg, available_tool_list, plugin_cfg, available_plugin_list" }, { "identifier": "ChatBot", "path": "gradio_utils.py", "snippet": "class ChatBot(ChatBotBase):\n\n def normalize_markdown(self, bot_message):\n lines = bot_message.split('\\n')\n normalized_lines = []\n inside_list = False\n\n for i, line in enumerate(lines):\n if re.match(r'^(\\d+\\.|-|\\*|\\+)\\s', line.strip()):\n if not inside_list and i > 0 and lines[i - 1].strip() != '':\n normalized_lines.append('')\n inside_list = True\n normalized_lines.append(line)\n elif inside_list and line.strip() == '':\n if i < len(lines) - 1 and not re.match(r'^(\\d+\\.|-|\\*|\\+)\\s',\n lines[i + 1].strip()):\n normalized_lines.append(line)\n continue\n else:\n inside_list = False\n normalized_lines.append(line)\n\n return '\\n'.join(normalized_lines)\n\n def convert_markdown(self, bot_message):\n if bot_message.count('```') % 2 != 0:\n bot_message += '\\n```'\n\n bot_message = self.normalize_markdown(bot_message)\n\n result = markdown.markdown(\n bot_message,\n extensions=[\n 'toc', 'extra', 'tables', 'markdown_katex', 'codehilite',\n 'markdown_cjk_spacing.cjk_spacing', 'pymdownx.magiclink'\n ],\n extension_configs={\n 'markdown_katex': {\n 'no_inline_svg': True, # fix for WeasyPrint\n 'insert_fonts_css': True,\n },\n 'codehilite': {\n 'linenums': False,\n 'guess_lang': True\n },\n 'mdx_truly_sane_lists': {\n 'nested_indent': 2,\n 'truly_sane': True,\n }\n })\n result = ''.join(result)\n return result\n\n @staticmethod\n def prompt_parse(message):\n output = ''\n if 'Thought' in message:\n if 'Action' in message or 'Action Input:' in message:\n re_pattern_thought = re.compile(\n pattern=r'([\\s\\S]+)Thought:([\\s\\S]+)Action:')\n\n res = re_pattern_thought.search(message)\n\n if res is None:\n re_pattern_thought_only = re.compile(\n pattern=r'Thought:([\\s\\S]+)Action:')\n res = re_pattern_thought_only.search(message)\n llm_result = ''\n else:\n llm_result = res.group(1).strip()\n action_thought_result = res.group(2).strip()\n\n re_pattern_action = re.compile(\n pattern=\n r'Action:([\\s\\S]+)Action Input:([\\s\\S]+)<\\|startofexec\\|>')\n res = re_pattern_action.search(message)\n if res is None:\n action, action_parameters = MRKLOutputParser(\n ).parse_response(message)\n else:\n action = res.group(1).strip()\n action_parameters = res.group(2)\n action_result = json.dumps({\n 'api_name': action,\n 'parameters': action_parameters\n })\n output += f'{llm_result}\\n{action_thought_result}\\n<|startofthink|>\\n{action_result}\\n<|endofthink|>\\n'\n if '<|startofexec|>' in message:\n re_pattern3 = re.compile(\n pattern=r'<\\|startofexec\\|>([\\s\\S]+)<\\|endofexec\\|>')\n res3 = re_pattern3.search(message)\n observation = res3.group(1).strip()\n output += f'\\n<|startofexec|>\\n{observation}\\n<|endofexec|>\\n'\n if 'Final Answer' in message:\n re_pattern2 = re.compile(\n pattern=r'Thought:([\\s\\S]+)Final Answer:([\\s\\S]+)')\n res2 = re_pattern2.search(message)\n # final_thought_result = res2.group(1).strip()\n final_answer_result = res2.group(2).strip()\n output += f'{final_answer_result}\\n'\n\n if output == '':\n return message\n print(output)\n return output\n else:\n return message\n\n def convert_bot_message(self, bot_message):\n\n bot_message = ChatBot.prompt_parse(bot_message)\n # print('processed bot message----------')\n # print(bot_message)\n # print('processed bot message done')\n start_pos = 0\n result = ''\n find_json_pattern = re.compile(r'{[\\s\\S]+}')\n START_OF_THINK_TAG, END_OF_THINK_TAG = '<|startofthink|>', '<|endofthink|>'\n START_OF_EXEC_TAG, END_OF_EXEC_TAG = '<|startofexec|>', '<|endofexec|>'\n while start_pos < len(bot_message):\n try:\n start_of_think_pos = bot_message.index(START_OF_THINK_TAG,\n start_pos)\n end_of_think_pos = bot_message.index(END_OF_THINK_TAG,\n start_pos)\n if start_pos < start_of_think_pos:\n result += self.convert_markdown(\n bot_message[start_pos:start_of_think_pos])\n think_content = bot_message[start_of_think_pos\n + len(START_OF_THINK_TAG\n ):end_of_think_pos].strip()\n json_content = find_json_pattern.search(think_content)\n think_content = json_content.group(\n ) if json_content else think_content\n try:\n think_node = json.loads(think_content)\n plugin_name = think_node.get(\n 'plugin_name',\n think_node.get('plugin',\n think_node.get('api_name', 'unknown')))\n summary = f'选择插件【{plugin_name}】,调用处理中...'\n del think_node['url']\n # think_node.pop('url', None)\n\n detail = f'```json\\n\\n{json.dumps(think_node, indent=3, ensure_ascii=False)}\\n\\n```'\n except Exception:\n summary = '思考中...'\n detail = think_content\n # traceback.print_exc()\n # detail += traceback.format_exc()\n result += '<details> <summary>' + summary + '</summary>' + self.convert_markdown(\n detail) + '</details>'\n # print(f'detail:{detail}')\n start_pos = end_of_think_pos + len(END_OF_THINK_TAG)\n except Exception:\n # result += traceback.format_exc()\n break\n # continue\n\n try:\n start_of_exec_pos = bot_message.index(START_OF_EXEC_TAG,\n start_pos)\n end_of_exec_pos = bot_message.index(END_OF_EXEC_TAG, start_pos)\n # print(start_of_exec_pos)\n # print(end_of_exec_pos)\n # print(bot_message[start_of_exec_pos:end_of_exec_pos])\n # print('------------------------')\n if start_pos < start_of_exec_pos:\n result += self.convert_markdown(\n bot_message[start_pos:start_of_think_pos])\n exec_content = bot_message[start_of_exec_pos\n + len(START_OF_EXEC_TAG\n ):end_of_exec_pos].strip()\n try:\n summary = '完成插件调用.'\n detail = f'```json\\n\\n{exec_content}\\n\\n```'\n except Exception:\n pass\n\n result += '<details> <summary>' + summary + '</summary>' + self.convert_markdown(\n detail) + '</details>'\n\n start_pos = end_of_exec_pos + len(END_OF_EXEC_TAG)\n except Exception:\n # result += traceback.format_exc()\n continue\n if start_pos < len(bot_message):\n result += self.convert_markdown(bot_message[start_pos:])\n result += ALREADY_CONVERTED_MARK\n return result\n\n def convert_bot_message_for_qwen(self, bot_message):\n\n start_pos = 0\n result = ''\n find_json_pattern = re.compile(r'{[\\s\\S]+}')\n ACTION = 'Action:'\n ACTION_INPUT = 'Action Input'\n OBSERVATION = 'Observation'\n RESULT_START = '<result>'\n RESULT_END = '</result>'\n while start_pos < len(bot_message):\n try:\n action_pos = bot_message.index(ACTION, start_pos)\n action_input_pos = bot_message.index(ACTION_INPUT, start_pos)\n result += self.convert_markdown(\n bot_message[start_pos:action_pos])\n # Action: image_gen\n # Action Input\n # {\"text\": \"金庸武侠 世界\", \"resolution\": \"1280x720\"}\n # Observation: <result>![IMAGEGEN](https://dashscope-result-sh.oss-cn-shanghai.aliyuncs.com/1d/e9/20231116/723609ee/d046d2d9-0c95-420b-9467-f0e831f5e2b7-1.png?Expires=1700227460&OSSAccessKeyId=LTAI5tQZd8AEcZX6KZV4G8qL&Signature=R0PlEazQF9uBD%2Fh9tkzOkJMGyg8%3D)<result> # noqa E501\n action_name = bot_message[action_pos\n + len(ACTION\n ):action_input_pos].strip()\n # action_start action_end 使用 Action Input 到 Observation 之间\n action_input_end = bot_message[action_input_pos:].index(\n OBSERVATION) - 1\n action_input = bot_message[action_input_pos:action_input_pos\n + action_input_end].strip()\n is_json = find_json_pattern.search(action_input)\n if is_json:\n action_input = is_json.group()\n else:\n action_input = re.sub(r'^Action Input[:]?[\\s]*', '',\n action_input)\n\n summary = f'调用工具 {action_name}'\n if is_json:\n detail = f'```json\\n\\n{json.dumps(json.loads(action_input), indent=4, ensure_ascii=False)}\\n\\n```'\n else:\n detail = action_input\n result += '<details> <summary>' + summary + '</summary>' + self.convert_markdown(\n detail) + '</details>'\n start_pos = action_input_pos + action_input_end + 1\n try:\n observation_pos = bot_message.index(OBSERVATION, start_pos)\n idx = observation_pos + len(OBSERVATION)\n obs_message = bot_message[idx:]\n observation_start_id = obs_message.index(\n RESULT_START) + len(RESULT_START)\n observation_end_idx = obs_message.index(RESULT_END)\n summary = '完成调用'\n exec_content = obs_message[\n observation_start_id:observation_end_idx]\n detail = f'```\\n\\n{exec_content}\\n\\n```'\n start_pos = idx + observation_end_idx + len(RESULT_END)\n except Exception:\n summary = '执行中...'\n detail = ''\n exec_content = None\n\n result += '<details> <summary>' + summary + '</summary>' + self.convert_markdown(\n detail) + '</details>'\n if exec_content is not None and '[IMAGEGEN]' in exec_content:\n # convert local file to base64\n re_pattern = re.compile(pattern=r'!\\[[^\\]]+\\]\\(([^)]+)\\)')\n res = re_pattern.search(exec_content)\n if res:\n image_path = res.group(1).strip()\n if os.path.isfile(image_path):\n exec_content = convert_url(\n exec_content,\n covert_image_to_base64(image_path))\n result += self.convert_markdown(f'{exec_content}')\n\n except Exception:\n # import traceback; traceback.print_exc()\n result += self.convert_markdown(bot_message[start_pos:])\n start_pos = len(bot_message[start_pos:])\n break\n\n result += ALREADY_CONVERTED_MARK\n return result\n\n def postprocess(\n self,\n message_pairs: list[list[str | tuple[str] | tuple[str, str] | None]\n | tuple],\n ) -> list[list[str | dict | None]]:\n \"\"\"\n Parameters:\n message_pairs: List of lists representing the message and response pairs.\n Each message and response should be a string, which may be in Markdown format.\n It can also be a tuple whose first element is a string or pathlib.\n Path filepath or URL to an image/video/audio, and second (optional) element is the alt text,\n in which case the media file is displayed. It can also be None, in which case that message is not displayed.\n Returns:\n List of lists representing the message and response. Each message and response will be a string of HTML,\n or a dictionary with media information. Or None if the message is not to be displayed.\n \"\"\"\n if message_pairs is None:\n return []\n processed_messages = []\n for message_pair in message_pairs:\n assert isinstance(\n message_pair, (tuple, list)\n ), f'Expected a list of lists or list of tuples. Received: {message_pair}'\n assert (\n len(message_pair) == 2\n ), f'Expected a list of lists of length 2 or list of tuples of length 2. Received: {message_pair}'\n if isinstance(message_pair[0], tuple) or isinstance(\n message_pair[1], tuple):\n processed_messages.append([\n self._postprocess_chat_messages(message_pair[0]),\n self._postprocess_chat_messages(message_pair[1]),\n ])\n else:\n # 处理不是元组的情况\n user_message, bot_message = message_pair\n\n if user_message and not user_message.endswith(\n ALREADY_CONVERTED_MARK):\n convert_md = self.convert_markdown(\n html.escape(user_message))\n user_message = f'{convert_md}' + ALREADY_CONVERTED_MARK\n if bot_message and not bot_message.endswith(\n ALREADY_CONVERTED_MARK):\n # bot_message = self.convert_bot_message(bot_message)\n bot_message = self.convert_bot_message_for_qwen(\n bot_message)\n processed_messages.append([\n user_message,\n bot_message,\n ])\n\n return processed_messages" }, { "identifier": "format_cover_html", "path": "gradio_utils.py", "snippet": "def format_cover_html(configuration, bot_avatar_path):\n if bot_avatar_path:\n image_src = covert_image_to_base64(bot_avatar_path)\n else:\n image_src = '//img.alicdn.com/imgextra/i3/O1CN01YPqZFO1YNZerQfSBk_!!6000000003047-0-tps-225-225.jpg'\n return f\"\"\"\n<div class=\"bot_cover\">\n <div class=\"bot_avatar\">\n <img src={image_src} />\n </div>\n <div class=\"bot_name\">{configuration.get(\"name\", \"\")}</div>\n <div class=\"bot_desp\">{configuration.get(\"description\", \"\")}</div>\n</div>\n\"\"\"" }, { "identifier": "init_user_chatbot_agent", "path": "user_core.py", "snippet": "def init_user_chatbot_agent(uuid_str=''):\n builder_cfg, model_cfg, tool_cfg, available_tool_list, plugin_cfg, available_plugin_list = parse_configuration(\n uuid_str)\n # set top_p and stop_words for role play\n model_cfg[builder_cfg.model]['generate_cfg']['top_p'] = 0.3\n # model_cfg[builder_cfg.model]['generate_cfg']['top_k'] = 2\n model_cfg[builder_cfg.model]['generate_cfg']['temperature'] = 0.3\n model_cfg[builder_cfg.model]['generate_cfg']['seed'] = 2222\n model_cfg[builder_cfg.model]['generate_cfg']['stop'] = 'Observation'\n\n # build model\n print(f'using model {builder_cfg.model}')\n print(f'model config {model_cfg[builder_cfg.model]}')\n\n # # check configuration\n # if builder_cfg.model in ['qwen-max', 'qwen-72b-api', 'qwen-14b-api', 'qwen-plus']:\n # if 'DASHSCOPE_API_KEY' not in os.environ:\n # raise gr.Error('DASHSCOPE_API_KEY should be set via setting environment variable')\n\n try:\n llm = LLMFactory.build_llm(builder_cfg.model, model_cfg)\n except Exception as e:\n raise gr.Error(str(e))\n\n # build prompt with zero shot react template\n instruction_template = parse_role_config(builder_cfg)\n prompt_generator = CustomPromptGenerator(\n system_template=DEFAULT_SYSTEM_TEMPLATE,\n user_template=DEFAULT_USER_TEMPLATE,\n exec_template=DEFAULT_EXEC_TEMPLATE,\n instruction_template=instruction_template,\n add_addition_round=True,\n addition_assistant_reply='好的。',\n knowledge_file_name=os.path.basename(builder_cfg.knowledge[0] if len(\n builder_cfg.knowledge) > 0 else ''),\n llm=llm,\n uuid_str=uuid_str)\n\n # get knowledge\n # 开源版本的向量库配置\n model_id = 'damo/nlp_gte_sentence-embedding_chinese-base'\n embeddings = ModelScopeEmbeddings(model_id=model_id)\n available_knowledge_list = []\n for item in builder_cfg.knowledge:\n # if isfile and end with .txt, .md, .pdf, support only those file\n if os.path.isfile(item) and item.endswith(('.txt', '.md', '.pdf')):\n available_knowledge_list.append(item)\n if len(available_knowledge_list) > 0:\n knowledge_retrieval = KnowledgeRetrieval.from_file(\n available_knowledge_list, embeddings, FAISS)\n else:\n knowledge_retrieval = None\n\n additional_tool_list = add_openapi_plugin_to_additional_tool(\n plugin_cfg, available_plugin_list)\n # build agent\n agent = AgentExecutor(\n llm,\n additional_tool_list=additional_tool_list,\n tool_cfg=tool_cfg,\n agent_type=AgentType.MRKL,\n prompt_generator=prompt_generator,\n knowledge_retrieval=knowledge_retrieval,\n tool_retrieval=False)\n agent.set_available_tools(available_tool_list + available_plugin_list)\n return agent" } ]
import os import random import shutil import sys import traceback import gradio as gr from config_utils import get_avatar_image, get_ci_dir, parse_configuration from gradio_utils import ChatBot, format_cover_html from user_core import init_user_chatbot_agent
5,869
uuid_str = 'local_user' builder_cfg, model_cfg, tool_cfg, available_tool_list, _, _ = parse_configuration( uuid_str) suggests = builder_cfg.get('prompt_recommend', []) avatar_pairs = get_avatar_image(builder_cfg.get('avatar', ''), uuid_str) customTheme = gr.themes.Default( primary_hue=gr.themes.utils.colors.blue, radius_size=gr.themes.utils.sizes.radius_none, ) def check_uuid(uuid_str): if not uuid_str or uuid_str == '': if os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio': raise gr.Error('请登陆后使用! (Please login first)') else: uuid_str = 'local_user' return uuid_str def init_user(state): try: seed = state.get('session_seed', random.randint(0, 1000000000)) user_agent = init_user_chatbot_agent(uuid_str) user_agent.seed = seed state['user_agent'] = user_agent except Exception as e: error = traceback.format_exc() print(f'Error:{e}, with detail: {error}') return state # 创建 Gradio 界面 demo = gr.Blocks(css='assets/appBot.css', theme=customTheme) with demo: gr.Markdown( '# <center> \N{fire} AgentFabric powered by Modelscope-agent ([github star](https://github.com/modelscope/modelscope-agent/tree/main))</center>' # noqa E501 ) draw_seed = random.randint(0, 1000000000) state = gr.State({'session_seed': draw_seed}) with gr.Row(elem_classes='container'): with gr.Column(scale=4): with gr.Column(): # Preview user_chatbot = ChatBot( value=[[None, '尝试问我一点什么吧~']], elem_id='user_chatbot', elem_classes=['markdown-body'], avatar_images=avatar_pairs, height=600, latex_delimiters=[], show_label=False) with gr.Row(): with gr.Column(scale=12): preview_chat_input = gr.Textbox( show_label=False, container=False, placeholder='跟我聊聊吧~') with gr.Column(min_width=70, scale=1): upload_button = gr.UploadButton( '上传', file_types=[ '.csv', '.doc', '.docx', '.xls', '.xlsx', '.txt', '.md', '.pdf', '.jpeg', '.png', '.jpg', '.gif' ], file_count='multiple') with gr.Column(min_width=70, scale=1): preview_send_button = gr.Button('发送', variant='primary') with gr.Column(scale=1): user_chat_bot_cover = gr.HTML( format_cover_html(builder_cfg, avatar_pairs[1])) user_chat_bot_suggest = gr.Examples( label='Prompt Suggestions', examples=suggests, inputs=[preview_chat_input]) def upload_file(chatbot, upload_button, _state): _uuid_str = check_uuid(uuid_str) new_file_paths = [] if 'file_paths' in _state: file_paths = _state['file_paths'] else: file_paths = [] for file in upload_button: file_name = os.path.basename(file.name) # covert xxx.json to xxx_uuid_str.json file_name = file_name.replace('.', f'_{_uuid_str}.')
uuid_str = 'local_user' builder_cfg, model_cfg, tool_cfg, available_tool_list, _, _ = parse_configuration( uuid_str) suggests = builder_cfg.get('prompt_recommend', []) avatar_pairs = get_avatar_image(builder_cfg.get('avatar', ''), uuid_str) customTheme = gr.themes.Default( primary_hue=gr.themes.utils.colors.blue, radius_size=gr.themes.utils.sizes.radius_none, ) def check_uuid(uuid_str): if not uuid_str or uuid_str == '': if os.getenv('MODELSCOPE_ENVIRONMENT') == 'studio': raise gr.Error('请登陆后使用! (Please login first)') else: uuid_str = 'local_user' return uuid_str def init_user(state): try: seed = state.get('session_seed', random.randint(0, 1000000000)) user_agent = init_user_chatbot_agent(uuid_str) user_agent.seed = seed state['user_agent'] = user_agent except Exception as e: error = traceback.format_exc() print(f'Error:{e}, with detail: {error}') return state # 创建 Gradio 界面 demo = gr.Blocks(css='assets/appBot.css', theme=customTheme) with demo: gr.Markdown( '# <center> \N{fire} AgentFabric powered by Modelscope-agent ([github star](https://github.com/modelscope/modelscope-agent/tree/main))</center>' # noqa E501 ) draw_seed = random.randint(0, 1000000000) state = gr.State({'session_seed': draw_seed}) with gr.Row(elem_classes='container'): with gr.Column(scale=4): with gr.Column(): # Preview user_chatbot = ChatBot( value=[[None, '尝试问我一点什么吧~']], elem_id='user_chatbot', elem_classes=['markdown-body'], avatar_images=avatar_pairs, height=600, latex_delimiters=[], show_label=False) with gr.Row(): with gr.Column(scale=12): preview_chat_input = gr.Textbox( show_label=False, container=False, placeholder='跟我聊聊吧~') with gr.Column(min_width=70, scale=1): upload_button = gr.UploadButton( '上传', file_types=[ '.csv', '.doc', '.docx', '.xls', '.xlsx', '.txt', '.md', '.pdf', '.jpeg', '.png', '.jpg', '.gif' ], file_count='multiple') with gr.Column(min_width=70, scale=1): preview_send_button = gr.Button('发送', variant='primary') with gr.Column(scale=1): user_chat_bot_cover = gr.HTML( format_cover_html(builder_cfg, avatar_pairs[1])) user_chat_bot_suggest = gr.Examples( label='Prompt Suggestions', examples=suggests, inputs=[preview_chat_input]) def upload_file(chatbot, upload_button, _state): _uuid_str = check_uuid(uuid_str) new_file_paths = [] if 'file_paths' in _state: file_paths = _state['file_paths'] else: file_paths = [] for file in upload_button: file_name = os.path.basename(file.name) # covert xxx.json to xxx_uuid_str.json file_name = file_name.replace('.', f'_{_uuid_str}.')
file_path = os.path.join(get_ci_dir(), file_name)
1
2023-12-12 04:24:00+00:00
8k
finned-tech/sportsbookreview-scraper
cli.py
[ { "identifier": "NFLOddsScraper", "path": "scrapers/sportsbookreview.py", "snippet": "class NFLOddsScraper(OddsScraper):\n def __init__(self, years):\n super().__init__(\"nfl\", years)\n self.base = (\n \"https://www.sportsbookreviewsonline.com/scoresoddsarchives/nfl-odds-\"\n )\n self.schema = {\n \"season\": [],\n \"date\": [],\n \"home_team\": [],\n \"away_team\": [],\n \"home_1stQtr\": [],\n \"away_1stQtr\": [],\n \"home_2ndQtr\": [],\n \"away_2ndQtr\": [],\n \"home_3rdQtr\": [],\n \"away_3rdQtr\": [],\n \"home_4thQtr\": [],\n \"away_4thQtr\": [],\n \"home_final\": [],\n \"away_final\": [],\n \"home_close_ml\": [],\n \"away_close_ml\": [],\n \"home_open_spread\": [],\n \"away_open_spread\": [],\n \"home_close_spread\": [],\n \"away_close_spread\": [],\n \"home_2H_spread\": [],\n \"away_2H_spread\": [],\n \"2H_total\": [],\n \"open_over_under\": [],\n \"close_over_under\": [],\n }\n\n def _reformat_data(self, df, season):\n new_df = pd.DataFrame()\n new_df[\"season\"] = [season] * len(df)\n new_df[\"date\"] = df[0].apply(lambda x: self._make_datestr(x, season))\n new_df[\"name\"] = df[3]\n new_df[\"1stQtr\"] = df[4]\n new_df[\"2ndQtr\"] = df[5]\n new_df[\"3rdQtr\"] = df[6]\n new_df[\"4thQtr\"] = df[7]\n new_df[\"final\"] = df[8]\n _open = df[9].apply(lambda x: 0 if x in self.blacklist else x)\n new_df[\"open_odds\"] = _open\n close = df[10].apply(lambda x: 0 if x in self.blacklist else x)\n new_df[\"close_odds\"] = close\n new_df[\"close_ml\"] = df[11]\n h2 = df[12].apply(lambda x: 0 if x in self.blacklist else x)\n new_df[\"2H_odds\"] = h2\n return new_df\n\n def _to_schema(self, df):\n new_df = self.schema.copy()\n df = df.fillna(0)\n progress = df.iterrows()\n for (i1, row), (i2, next_row) in self._pairwise(progress):\n home_ml = int(next_row[\"close_ml\"])\n away_ml = int(row[\"close_ml\"])\n\n odds1 = float(row[\"open_odds\"])\n odds2 = float(next_row[\"open_odds\"])\n if odds1 < odds2:\n open_spread = odds1\n close_spread = float(row[\"close_odds\"])\n h2_spread = float(row[\"2H_odds\"])\n\n h2_total = float(next_row[\"2H_odds\"])\n open_ou = odds2\n close_ou = float(next_row[\"close_odds\"])\n else:\n open_spread = odds2\n close_spread = float(next_row[\"close_odds\"])\n h2_spread = float(next_row[\"2H_odds\"])\n\n h2_total = float(row[\"2H_odds\"])\n open_ou = odds1\n close_ou = float(row[\"close_odds\"])\n\n home_open_spread = -open_spread if home_ml < away_ml else open_spread\n away_open_spread = -home_open_spread\n home_close_spread = -close_spread if home_ml < away_ml else close_spread\n away_close_spread = -home_close_spread\n h2_home_spread = -h2_spread if home_ml < away_ml else h2_spread\n h2_away_spread = -h2_home_spread\n\n new_df[\"season\"].append(row[\"season\"])\n new_df[\"date\"].append(row[\"date\"])\n new_df[\"home_team\"].append(self._translate(next_row[\"name\"]))\n new_df[\"away_team\"].append(self._translate(row[\"name\"]))\n new_df[\"home_1stQtr\"].append(next_row[\"1stQtr\"])\n new_df[\"away_1stQtr\"].append(row[\"1stQtr\"])\n new_df[\"home_2ndQtr\"].append(next_row[\"2ndQtr\"])\n new_df[\"away_2ndQtr\"].append(row[\"2ndQtr\"])\n new_df[\"home_3rdQtr\"].append(next_row[\"3rdQtr\"])\n new_df[\"away_3rdQtr\"].append(row[\"3rdQtr\"])\n new_df[\"home_4thQtr\"].append(next_row[\"4thQtr\"])\n new_df[\"away_4thQtr\"].append(row[\"4thQtr\"])\n new_df[\"home_final\"].append(next_row[\"final\"])\n new_df[\"away_final\"].append(row[\"final\"])\n new_df[\"home_close_ml\"].append(home_ml)\n new_df[\"away_close_ml\"].append(away_ml)\n new_df[\"home_open_spread\"].append(home_open_spread)\n new_df[\"away_open_spread\"].append(away_open_spread)\n new_df[\"home_close_spread\"].append(home_close_spread)\n new_df[\"away_close_spread\"].append(away_close_spread)\n new_df[\"home_2H_spread\"].append(h2_home_spread)\n new_df[\"away_2H_spread\"].append(h2_away_spread)\n new_df[\"2H_total\"].append(h2_total)\n new_df[\"open_over_under\"].append(open_ou)\n new_df[\"close_over_under\"].append(close_ou)\n\n return pd.DataFrame(new_df)" }, { "identifier": "NBAOddsScraper", "path": "scrapers/sportsbookreview.py", "snippet": "class NBAOddsScraper(NFLOddsScraper):\n def __init__(self, years):\n super().__init__(years)\n self.sport = \"nba\"\n self.base = (\n \"https://www.sportsbookreviewsonline.com/scoresoddsarchives/nba-odds-\"\n )\n self.schema = {\n \"season\": [],\n \"date\": [],\n \"home_team\": [],\n \"away_team\": [],\n \"home_1stQtr\": [],\n \"away_1stQtr\": [],\n \"home_2ndQtr\": [],\n \"away_2ndQtr\": [],\n \"home_3rdQtr\": [],\n \"away_3rdQtr\": [],\n \"home_4thQtr\": [],\n \"away_4thQtr\": [],\n \"home_final\": [],\n \"away_final\": [],\n \"home_close_ml\": [],\n \"away_close_ml\": [],\n \"home_open_spread\": [],\n \"away_open_spread\": [],\n \"home_close_spread\": [],\n \"away_close_spread\": [],\n \"home_2H_spread\": [],\n \"away_2H_spread\": [],\n \"2H_total\": [],\n \"open_over_under\": [],\n \"close_over_under\": [],\n }" }, { "identifier": "NHLOddsScraper", "path": "scrapers/sportsbookreview.py", "snippet": "class NHLOddsScraper(OddsScraper):\n def __init__(self, years):\n super().__init__(\"nhl\", years)\n self.base = (\n \"https://www.sportsbookreviewsonline.com/scoresoddsarchives/nhl-odds-\"\n )\n self.schema = {\n \"season\": [],\n \"date\": [],\n \"home_team\": [],\n \"away_team\": [],\n \"home_1stPeriod\": [],\n \"away_1stPeriod\": [],\n \"home_2ndPeriod\": [],\n \"away_2ndPeriod\": [],\n \"home_3rdPeriod\": [],\n \"away_3rdPeriod\": [],\n \"home_final\": [],\n \"away_final\": [],\n \"home_open_ml\": [],\n \"away_open_ml\": [],\n \"home_close_ml\": [],\n \"away_close_ml\": [],\n \"home_close_spread\": [],\n \"away_close_spread\": [],\n \"home_close_spread_odds\": [],\n \"away_close_spread_odds\": [],\n \"open_over_under\": [],\n \"open_over_under_odds\": [],\n \"close_over_under\": [],\n \"close_over_under_odds\": [],\n }\n\n def _reformat_data(self, df, season, covid=False):\n new_df = pd.DataFrame()\n new_df[\"season\"] = [season] * len(df)\n new_df[\"date\"] = df[0].apply(\n lambda x: self._make_datestr(x, season)\n if not covid\n else self._make_datestr(x, season, start=1, yr_end=3)\n )\n new_df[\"name\"] = df[3]\n new_df[\"1stPeriod\"] = df[4]\n new_df[\"2ndPeriod\"] = df[5]\n new_df[\"3rdPeriod\"] = df[6]\n new_df[\"final\"] = df[7]\n new_df[\"open_ml\"] = df[8]\n new_df[\"open_ml\"] = new_df[\"open_ml\"].apply(\n lambda x: 0 if x in self.blacklist else x\n )\n new_df[\"close_ml\"] = df[9]\n new_df[\"close_ml\"] = new_df[\"close_ml\"].apply(\n lambda x: 0 if x in self.blacklist else x\n )\n new_df[\"close_spread\"] = df[10] if season > 2013 else 0\n new_df[\"close_spread\"] = new_df[\"close_spread\"].apply(\n lambda x: 0 if x in self.blacklist else float(x)\n )\n new_df[\"close_spread_odds\"] = df[11] if season > 2013 else 0\n new_df[\"close_spread_odds\"] = new_df[\"close_spread_odds\"].apply(\n lambda x: 0 if x in self.blacklist else float(x)\n )\n new_df[\"open_over_under\"] = df[12] if season > 2013 else df[10]\n new_df[\"open_over_under\"] = new_df[\"open_over_under\"].apply(\n lambda x: 0 if x in self.blacklist else float(x)\n )\n new_df[\"open_over_under_odds\"] = df[13] if season > 2013 else df[11]\n new_df[\"open_over_under_odds\"] = new_df[\"open_over_under_odds\"].apply(\n lambda x: 0 if x in self.blacklist else float(x)\n )\n new_df[\"close_over_under\"] = df[14] if season > 2013 else df[12]\n new_df[\"close_over_under\"] = new_df[\"close_over_under\"].apply(\n lambda x: 0 if x in self.blacklist else float(x)\n )\n new_df[\"close_over_under_odds\"] = df[15] if season > 2013 else df[13]\n new_df[\"close_over_under_odds\"] = new_df[\"close_over_under_odds\"].apply(\n lambda x: 0 if x in self.blacklist else float(x)\n )\n\n return new_df\n\n def _to_schema(self, df):\n new_df = self.schema.copy()\n df = df.fillna(0)\n progress = df.iterrows()\n for (i1, row), (i2, next_row) in self._pairwise(progress):\n new_df[\"season\"].append(row[\"season\"])\n new_df[\"date\"].append(row[\"date\"])\n new_df[\"home_team\"].append(self._translate(next_row[\"name\"]))\n new_df[\"away_team\"].append(self._translate(row[\"name\"]))\n new_df[\"home_1stPeriod\"].append(next_row[\"1stPeriod\"])\n new_df[\"away_1stPeriod\"].append(row[\"1stPeriod\"])\n new_df[\"home_2ndPeriod\"].append(next_row[\"2ndPeriod\"])\n new_df[\"away_2ndPeriod\"].append(row[\"2ndPeriod\"])\n new_df[\"home_3rdPeriod\"].append(next_row[\"3rdPeriod\"])\n new_df[\"away_3rdPeriod\"].append(row[\"3rdPeriod\"])\n new_df[\"home_final\"].append(next_row[\"final\"])\n new_df[\"away_final\"].append(row[\"final\"])\n new_df[\"home_open_ml\"].append(int(next_row[\"open_ml\"]))\n new_df[\"away_open_ml\"].append(int(row[\"open_ml\"]))\n new_df[\"home_close_ml\"].append(int(next_row[\"close_ml\"]))\n new_df[\"away_close_ml\"].append(int(row[\"close_ml\"]))\n new_df[\"home_close_spread\"].append(next_row[\"close_spread\"])\n new_df[\"away_close_spread\"].append(row[\"close_spread\"])\n new_df[\"home_close_spread_odds\"].append(next_row[\"close_spread_odds\"])\n new_df[\"away_close_spread_odds\"].append(row[\"close_spread_odds\"])\n new_df[\"open_over_under\"].append(next_row[\"open_over_under\"])\n new_df[\"open_over_under_odds\"].append(next_row[\"open_over_under_odds\"])\n new_df[\"close_over_under\"].append(next_row[\"close_over_under\"])\n new_df[\"close_over_under_odds\"].append(next_row[\"close_over_under_odds\"])\n\n return pd.DataFrame(new_df)\n\n def driver(self):\n dfs = pd.DataFrame()\n for season in self.seasons:\n # compensate for the COVID shortened season in 2021\n season_str = self._make_season(season) if season != 2020 else \"2021\"\n is_cov = True if season == 2020 else False\n url = self.base + season_str\n\n # Sportsbookreview has scraper protection, so we need to set a user agent\n # to get around this.\n headers = {\"User-Agent\": \"Mozilla/5.0\"}\n r = requests.get(url, headers=headers)\n\n dfs = pd.concat(\n [dfs, self._reformat_data(pd.read_html(r.text)[0][1:], season, is_cov)],\n axis=0,\n )\n\n return self._to_schema(dfs)" }, { "identifier": "MLBOddsScraper", "path": "scrapers/sportsbookreview.py", "snippet": "class MLBOddsScraper(OddsScraper):\n def __init__(self, years):\n super().__init__(\"mlb\", years)\n self.base = \"https://www.sportsbookreviewsonline.com/wp-content/uploads/sportsbookreviewsonline_com_737/mlb-odds-\"\n self.ext = \".xlsx\"\n self.schema = {\n \"season\": [],\n \"date\": [],\n \"home_team\": [],\n \"away_team\": [],\n \"home_1stInn\": [],\n \"away_1stInn\": [],\n \"home_2ndInn\": [],\n \"away_2ndInn\": [],\n \"home_3rdInn\": [],\n \"away_3rdInn\": [],\n \"home_4thInn\": [],\n \"away_4thInn\": [],\n \"home_5thInn\": [],\n \"away_5thInn\": [],\n \"home_6thInn\": [],\n \"away_6thInn\": [],\n \"home_7thInn\": [],\n \"away_7thInn\": [],\n \"home_8thInn\": [],\n \"away_8thInn\": [],\n \"home_9thInn\": [],\n \"away_9thInn\": [],\n \"home_final\": [],\n \"away_final\": [],\n \"home_open_ml\": [],\n \"away_open_ml\": [],\n \"home_close_ml\": [],\n \"away_close_ml\": [],\n \"home_close_spread\": [],\n \"away_close_spread\": [],\n \"home_close_spread_odds\": [],\n \"away_close_spread_odds\": [],\n \"open_over_under\": [],\n \"open_over_under_odds\": [],\n \"close_over_under\": [],\n \"close_over_under_odds\": [],\n }\n\n def _reformat_data(self, df, season):\n new_df = pd.DataFrame()\n new_df[\"season\"] = [season] * len(df)\n new_df[\"date\"] = df[0].apply(\n lambda x: self._make_datestr(x, season, start=3, yr_end=10)\n )\n new_df[\"name\"] = df[3]\n new_df[\"1stInn\"] = df[5]\n new_df[\"2ndInn\"] = df[6]\n new_df[\"3rdInn\"] = df[7]\n new_df[\"4thInn\"] = df[8]\n new_df[\"5thInn\"] = df[9]\n new_df[\"6thInn\"] = df[10]\n new_df[\"7thInn\"] = df[11]\n new_df[\"8thInn\"] = df[12]\n new_df[\"9thInn\"] = df[13]\n new_df[\"final\"] = df[14]\n new_df[\"open_ml\"] = df[15]\n new_df[\"close_ml\"] = df[16]\n new_df[\"close_spread\"] = df[17] if season > 2013 else 0\n new_df[\"close_spread_odds\"] = df[18] if season > 2013 else 0\n new_df[\"open_over_under\"] = df[19] if season > 2013 else df[17]\n new_df[\"open_over_under_odds\"] = df[20] if season > 2013 else df[18]\n new_df[\"close_over_under\"] = df[21] if season > 2013 else df[19]\n new_df[\"close_over_under_odds\"] = df[22] if season > 2013 else df[20]\n\n return new_df\n\n def _to_schema(self, df):\n new_df = self.schema.copy()\n progress = df.iterrows()\n for (i1, row), (i2, next_row) in self._pairwise(progress):\n new_df[\"season\"].append(row[\"season\"])\n new_df[\"date\"].append(row[\"date\"])\n new_df[\"home_team\"].append(self._translate(next_row[\"name\"]))\n new_df[\"away_team\"].append(self._translate(row[\"name\"]))\n new_df[\"home_1stInn\"].append(next_row[\"1stInn\"])\n new_df[\"away_1stInn\"].append(row[\"1stInn\"])\n new_df[\"home_2ndInn\"].append(next_row[\"2ndInn\"])\n new_df[\"away_2ndInn\"].append(row[\"2ndInn\"])\n new_df[\"home_3rdInn\"].append(next_row[\"3rdInn\"])\n new_df[\"away_3rdInn\"].append(row[\"3rdInn\"])\n new_df[\"home_4thInn\"].append(next_row[\"4thInn\"])\n new_df[\"away_4thInn\"].append(row[\"4thInn\"])\n new_df[\"home_5thInn\"].append(next_row[\"5thInn\"])\n new_df[\"away_5thInn\"].append(row[\"5thInn\"])\n new_df[\"home_6thInn\"].append(next_row[\"6thInn\"])\n new_df[\"away_6thInn\"].append(row[\"6thInn\"])\n new_df[\"home_7thInn\"].append(next_row[\"7thInn\"])\n new_df[\"away_7thInn\"].append(row[\"7thInn\"])\n new_df[\"home_8thInn\"].append(next_row[\"8thInn\"])\n new_df[\"away_8thInn\"].append(row[\"8thInn\"])\n new_df[\"home_9thInn\"].append(next_row[\"9thInn\"])\n new_df[\"away_9thInn\"].append(row[\"9thInn\"])\n new_df[\"home_final\"].append(next_row[\"final\"])\n new_df[\"away_final\"].append(row[\"final\"])\n new_df[\"home_open_ml\"].append(next_row[\"open_ml\"])\n new_df[\"away_open_ml\"].append(row[\"open_ml\"])\n new_df[\"home_close_ml\"].append(next_row[\"close_ml\"])\n new_df[\"away_close_ml\"].append(row[\"close_ml\"])\n new_df[\"home_close_spread\"].append(next_row[\"close_spread\"])\n new_df[\"away_close_spread\"].append(row[\"close_spread\"])\n new_df[\"home_close_spread_odds\"].append(next_row[\"close_spread_odds\"])\n new_df[\"away_close_spread_odds\"].append(row[\"close_spread_odds\"])\n new_df[\"open_over_under\"].append(next_row[\"open_over_under\"])\n new_df[\"open_over_under_odds\"].append(next_row[\"open_over_under_odds\"])\n new_df[\"close_over_under\"].append(next_row[\"close_over_under\"])\n new_df[\"close_over_under_odds\"].append(next_row[\"close_over_under_odds\"])\n\n return pd.DataFrame(new_df)\n\n def driver(self):\n dfs = pd.DataFrame()\n for season in self.seasons:\n url = self.base + str(season) + self.ext\n headers = {\"User-Agent\": \"Mozilla/5.0\"}\n r = requests.get(url, headers=headers)\n\n with io.BytesIO(r.content) as fh:\n df = pd.read_excel(fh, header=None, sheet_name=None)\n dfs = pd.concat(\n [dfs, self._reformat_data(df[\"Sheet1\"][1:], season)], axis=0\n )\n\n return self._to_schema(dfs)" } ]
import argparse import config from scrapers.sportsbookreview import ( NFLOddsScraper, NBAOddsScraper, NHLOddsScraper, MLBOddsScraper, )
5,377
parser = argparse.ArgumentParser() parser.add_argument("--sport", type=str, required=True) # start and end years parser.add_argument("--start", type=int, required=True) parser.add_argument("--end", type=int, required=True) # filename for output parser.add_argument("--filename", type=str, required=True) # output format (csv or json), default is json parser.add_argument("--format", type=str, default="json") args = parser.parse_args() if __name__ == "__main__": if args.start < config.MIN_YEAR or args.end > config.MAX_YEAR: raise ValueError( f"Invalid year range. Must be between {config.MIN_YEAR} and {config.MAX_YEAR}." ) if args.start > args.end: raise ValueError("Invalid year range. Start year must be before end year.") list_yrs = list(range(args.start, args.end + 1)) scrapers = { "nfl": NFLOddsScraper,
parser = argparse.ArgumentParser() parser.add_argument("--sport", type=str, required=True) # start and end years parser.add_argument("--start", type=int, required=True) parser.add_argument("--end", type=int, required=True) # filename for output parser.add_argument("--filename", type=str, required=True) # output format (csv or json), default is json parser.add_argument("--format", type=str, default="json") args = parser.parse_args() if __name__ == "__main__": if args.start < config.MIN_YEAR or args.end > config.MAX_YEAR: raise ValueError( f"Invalid year range. Must be between {config.MIN_YEAR} and {config.MAX_YEAR}." ) if args.start > args.end: raise ValueError("Invalid year range. Start year must be before end year.") list_yrs = list(range(args.start, args.end + 1)) scrapers = { "nfl": NFLOddsScraper,
"nba": NBAOddsScraper,
1
2023-12-10 07:36:05+00:00
8k
chenchenygu/watermark-learnability
compute_metrics.py
[ { "identifier": "WatermarkDetector", "path": "kgw_watermarking/watermark_reliability_release/watermark_processor.py", "snippet": "class WatermarkDetector(WatermarkBase):\n \"\"\"This is the detector for all watermarks imprinted with WatermarkLogitsProcessor.\n\n The detector needs to be given the exact same settings that were given during text generation to replicate the watermark\n greenlist generation and so detect the watermark.\n This includes the correct device that was used during text generation, the correct tokenizer, the correct\n seeding_scheme name, and parameters (delta, gamma).\n\n Optional arguments are\n * normalizers [\"unicode\", \"homoglyphs\", \"truecase\"] -> These can mitigate modifications to generated text that could trip the watermark\n * ignore_repeated_ngrams -> This option changes the detection rules to count every unique ngram only once.\n * z_threshold -> Changing this threshold will change the sensitivity of the detector.\n \"\"\"\n\n def __init__(\n self,\n *args,\n device: torch.device = None,\n tokenizer: Tokenizer = None,\n z_threshold: float = 4.0,\n normalizers: list[str] = [\"unicode\"], # or also: [\"unicode\", \"homoglyphs\", \"truecase\"]\n ignore_repeated_ngrams: bool = False,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n # also configure the metrics returned/preprocessing options\n assert device, \"Must pass device\"\n assert tokenizer, \"Need an instance of the generating tokenizer to perform detection\"\n\n self.tokenizer = tokenizer\n self.device = device\n self.z_threshold = z_threshold\n self.rng = torch.Generator(device=self.device)\n\n self.normalizers = []\n for normalization_strategy in normalizers:\n self.normalizers.append(normalization_strategy_lookup(normalization_strategy))\n self.ignore_repeated_ngrams = ignore_repeated_ngrams\n\n def dummy_detect(\n self,\n return_prediction: bool = True,\n return_scores: bool = True,\n z_threshold: float = None,\n return_num_tokens_scored: bool = True,\n return_num_green_tokens: bool = True,\n return_green_fraction: bool = True,\n return_green_token_mask: bool = False,\n return_all_window_scores: bool = False,\n return_z_score: bool = True,\n return_z_at_T: bool = True,\n return_p_value: bool = True,\n ):\n # HF-style output dictionary\n score_dict = dict()\n if return_num_tokens_scored:\n score_dict.update(dict(num_tokens_scored=float(\"nan\")))\n if return_num_green_tokens:\n score_dict.update(dict(num_green_tokens=float(\"nan\")))\n if return_green_fraction:\n score_dict.update(dict(green_fraction=float(\"nan\")))\n if return_z_score:\n score_dict.update(dict(z_score=float(\"nan\")))\n if return_p_value:\n z_score = score_dict.get(\"z_score\")\n if z_score is None:\n z_score = float(\"nan\")\n score_dict.update(dict(p_value=float(\"nan\")))\n if return_green_token_mask:\n score_dict.update(dict(green_token_mask=[]))\n if return_all_window_scores:\n score_dict.update(dict(window_list=[]))\n if return_z_at_T:\n score_dict.update(dict(z_score_at_T=torch.tensor([])))\n\n output_dict = {}\n if return_scores:\n output_dict.update(score_dict)\n # if passed return_prediction then perform the hypothesis test and return the outcome\n if return_prediction:\n z_threshold = z_threshold if z_threshold else self.z_threshold\n assert (\n z_threshold is not None\n ), \"Need a threshold in order to decide outcome of detection test\"\n output_dict[\"prediction\"] = False\n\n return output_dict\n\n def _compute_z_score(self, observed_count, T):\n # count refers to number of green tokens, T is total number of tokens\n expected_count = self.gamma\n numer = observed_count - expected_count * T\n denom = sqrt(T * expected_count * (1 - expected_count))\n z = numer / denom\n return z\n\n def _compute_p_value(self, observed_count, T):\n p_value = scipy.stats.binom.sf(observed_count, T, self.gamma)\n return p_value\n\n @lru_cache(maxsize=2**32)\n def _get_ngram_score_cached(self, prefix: tuple[int], target: int):\n \"\"\"Expensive re-seeding and sampling is cached.\"\"\"\n # Handle with care, should ideally reset on __getattribute__ access to self.prf_type, self.context_width, self.self_salt, self.hash_key\n greenlist_ids = self._get_greenlist_ids(torch.as_tensor(prefix, device=self.device))\n return True if target in greenlist_ids else False\n\n def _score_ngrams_in_passage(self, input_ids: torch.Tensor):\n \"\"\"Core function to gather all ngrams in the input and compute their watermark.\"\"\"\n if len(input_ids) - self.context_width < 1:\n raise ValueError(\n f\"Must have at least {1} token to score after \"\n f\"the first min_prefix_len={self.context_width} tokens required by the seeding scheme.\"\n )\n\n # Compute scores for all ngrams contexts in the passage:\n token_ngram_generator = ngrams(\n input_ids.cpu().tolist(), self.context_width + 1 - self.self_salt\n )\n frequencies_table = collections.Counter(token_ngram_generator)\n ngram_to_watermark_lookup = {}\n for idx, ngram_example in enumerate(frequencies_table.keys()):\n prefix = ngram_example if self.self_salt else ngram_example[:-1]\n target = ngram_example[-1]\n ngram_to_watermark_lookup[ngram_example] = self._get_ngram_score_cached(prefix, target)\n\n return ngram_to_watermark_lookup, frequencies_table\n\n def _get_green_at_T_booleans(self, input_ids, ngram_to_watermark_lookup) -> tuple[torch.Tensor]:\n \"\"\"Generate binary list of green vs. red per token, a separate list that ignores repeated ngrams, and a list of offsets to\n convert between both representations:\n green_token_mask = green_token_mask_unique[offsets] except for all locations where otherwise a repeat would be counted\n \"\"\"\n green_token_mask, green_token_mask_unique, offsets = [], [], []\n used_ngrams = {}\n unique_ngram_idx = 0\n ngram_examples = ngrams(input_ids.cpu().tolist(), self.context_width + 1 - self.self_salt)\n\n for idx, ngram_example in enumerate(ngram_examples):\n green_token_mask.append(ngram_to_watermark_lookup[ngram_example])\n if self.ignore_repeated_ngrams:\n if ngram_example in used_ngrams:\n pass\n else:\n used_ngrams[ngram_example] = True\n unique_ngram_idx += 1\n green_token_mask_unique.append(ngram_to_watermark_lookup[ngram_example])\n else:\n green_token_mask_unique.append(ngram_to_watermark_lookup[ngram_example])\n unique_ngram_idx += 1\n offsets.append(unique_ngram_idx - 1)\n return (\n torch.tensor(green_token_mask),\n torch.tensor(green_token_mask_unique),\n torch.tensor(offsets),\n )\n\n def _score_sequence(\n self,\n input_ids: torch.Tensor,\n return_num_tokens_scored: bool = True,\n return_num_green_tokens: bool = True,\n return_green_fraction: bool = True,\n return_green_token_mask: bool = False,\n return_z_score: bool = True,\n return_z_at_T: bool = True,\n return_p_value: bool = True,\n ):\n ngram_to_watermark_lookup, frequencies_table = self._score_ngrams_in_passage(input_ids)\n green_token_mask, green_unique, offsets = self._get_green_at_T_booleans(\n input_ids, ngram_to_watermark_lookup\n )\n\n # Count up scores over all ngrams\n if self.ignore_repeated_ngrams:\n # Method that only counts a green/red hit once per unique ngram.\n # New num total tokens scored (T) becomes the number unique ngrams.\n # We iterate over all unqiue token ngrams in the input, computing the greenlist\n # induced by the context in each, and then checking whether the last\n # token falls in that greenlist.\n num_tokens_scored = len(frequencies_table.keys())\n green_token_count = sum(ngram_to_watermark_lookup.values())\n else:\n num_tokens_scored = sum(frequencies_table.values())\n assert num_tokens_scored == len(input_ids) - self.context_width + self.self_salt\n green_token_count = sum(\n freq * outcome\n for freq, outcome in zip(\n frequencies_table.values(), ngram_to_watermark_lookup.values()\n )\n )\n assert green_token_count == green_unique.sum()\n\n # HF-style output dictionary\n score_dict = dict()\n if return_num_tokens_scored:\n score_dict.update(dict(num_tokens_scored=num_tokens_scored))\n if return_num_green_tokens:\n score_dict.update(dict(num_green_tokens=green_token_count))\n if return_green_fraction:\n score_dict.update(dict(green_fraction=(green_token_count / num_tokens_scored)))\n if return_z_score:\n score_dict.update(\n dict(z_score=self._compute_z_score(green_token_count, num_tokens_scored))\n )\n if return_p_value:\n z_score = score_dict.get(\"z_score\")\n if z_score is None:\n z_score = self._compute_z_score(green_token_count, num_tokens_scored)\n score_dict.update(dict(p_value=self._compute_p_value(green_token_count, num_tokens_scored)))\n if return_green_token_mask:\n score_dict.update(dict(green_token_mask=green_token_mask.tolist()))\n if return_z_at_T:\n # Score z_at_T separately:\n sizes = torch.arange(1, len(green_unique) + 1)\n seq_z_score_enum = torch.cumsum(green_unique, dim=0) - self.gamma * sizes\n seq_z_score_denom = torch.sqrt(sizes * self.gamma * (1 - self.gamma))\n z_score_at_effective_T = seq_z_score_enum / seq_z_score_denom\n z_score_at_T = z_score_at_effective_T[offsets]\n assert torch.isclose(z_score_at_T[-1], torch.tensor(z_score))\n\n score_dict.update(dict(z_score_at_T=z_score_at_T))\n\n return score_dict\n\n def _score_windows_impl_batched(\n self,\n input_ids: torch.Tensor,\n window_size: str,\n window_stride: int = 1,\n ):\n # Implementation details:\n # 1) --ignore_repeated_ngrams is applied globally, and windowing is then applied over the reduced binary vector\n # this is only one way of doing it, another would be to ignore bigrams within each window (maybe harder to parallelize that)\n # 2) These windows on the binary vector of green/red hits, independent of context_width, in contrast to Kezhi's first implementation\n # 3) z-scores from this implementation cannot be directly converted to p-values, and should only be used as labels for a\n # ROC chart that calibrates to a chosen FPR. Due, to windowing, the multiple hypotheses will increase scores across the board#\n # naive_count_correction=True is a partial remedy to this\n\n ngram_to_watermark_lookup, frequencies_table = self._score_ngrams_in_passage(input_ids)\n green_mask, green_ids, offsets = self._get_green_at_T_booleans(\n input_ids, ngram_to_watermark_lookup\n )\n len_full_context = len(green_ids)\n\n partial_sum_id_table = torch.cumsum(green_ids, dim=0)\n\n if window_size == \"max\":\n # could start later, small window sizes cannot generate enough power\n # more principled: solve (T * Spike_Entropy - g * T) / sqrt(T * g * (1 - g)) = z_thresh for T\n sizes = range(1, len_full_context)\n else:\n sizes = [int(x) for x in window_size.split(\",\") if len(x) > 0]\n\n z_score_max_per_window = torch.zeros(len(sizes))\n cumulative_eff_z_score = torch.zeros(len_full_context)\n s = window_stride\n\n window_fits = False\n for idx, size in enumerate(sizes):\n if size <= len_full_context:\n # Compute hits within window for all positions in parallel:\n window_score = torch.zeros(len_full_context - size + 1, dtype=torch.long)\n # Include 0-th window\n window_score[0] = partial_sum_id_table[size - 1]\n # All other windows from the 1st:\n window_score[1:] = partial_sum_id_table[size::s] - partial_sum_id_table[:-size:s]\n\n # Now compute batched z_scores\n batched_z_score_enum = window_score - self.gamma * size\n z_score_denom = sqrt(size * self.gamma * (1 - self.gamma))\n batched_z_score = batched_z_score_enum / z_score_denom\n\n # And find the maximal hit\n maximal_z_score = batched_z_score.max()\n z_score_max_per_window[idx] = maximal_z_score\n\n z_score_at_effective_T = torch.cummax(batched_z_score, dim=0)[0]\n cumulative_eff_z_score[size::s] = torch.maximum(\n cumulative_eff_z_score[size::s], z_score_at_effective_T[:-1]\n )\n window_fits = True # successful computation for any window in sizes\n\n if not window_fits:\n raise ValueError(\n f\"Could not find a fitting window with window sizes {window_size} for (effective) context length {len_full_context}.\"\n )\n\n # Compute optimal window size and z-score\n cumulative_z_score = cumulative_eff_z_score[offsets]\n optimal_z, optimal_window_size_idx = z_score_max_per_window.max(dim=0)\n optimal_window_size = sizes[optimal_window_size_idx]\n return (\n optimal_z,\n optimal_window_size,\n z_score_max_per_window,\n cumulative_z_score,\n green_mask,\n )\n\n def _score_sequence_window(\n self,\n input_ids: torch.Tensor,\n return_num_tokens_scored: bool = True,\n return_num_green_tokens: bool = True,\n return_green_fraction: bool = True,\n return_green_token_mask: bool = False,\n return_z_score: bool = True,\n return_z_at_T: bool = True,\n return_p_value: bool = True,\n window_size: str = None,\n window_stride: int = 1,\n ):\n (\n optimal_z,\n optimal_window_size,\n _,\n z_score_at_T,\n green_mask,\n ) = self._score_windows_impl_batched(input_ids, window_size, window_stride)\n\n # HF-style output dictionary\n score_dict = dict()\n if return_num_tokens_scored:\n score_dict.update(dict(num_tokens_scored=optimal_window_size))\n\n denom = sqrt(optimal_window_size * self.gamma * (1 - self.gamma))\n green_token_count = int(optimal_z * denom + self.gamma * optimal_window_size)\n green_fraction = green_token_count / optimal_window_size\n if return_num_green_tokens:\n score_dict.update(dict(num_green_tokens=green_token_count))\n if return_green_fraction:\n score_dict.update(dict(green_fraction=green_fraction))\n if return_z_score:\n score_dict.update(dict(z_score=optimal_z))\n if return_z_at_T:\n score_dict.update(dict(z_score_at_T=z_score_at_T))\n if return_p_value:\n z_score = score_dict.get(\"z_score\", optimal_z)\n score_dict.update(dict(p_value=self._compute_p_value(green_token_count, optimal_window_size)))\n\n # Return per-token results for mask. This is still the same, just scored by windows\n # todo would be to mark the actually counted tokens differently\n if return_green_token_mask:\n score_dict.update(dict(green_token_mask=green_mask.tolist()))\n\n return score_dict\n\n def detect(\n self,\n text: str = None,\n tokenized_text: list[int] = None,\n window_size: str = None,\n window_stride: int = None,\n return_prediction: bool = True,\n return_scores: bool = True,\n z_threshold: float = None,\n convert_to_float: bool = False,\n **kwargs,\n ) -> dict:\n \"\"\"Scores a given string of text and returns a dictionary of results.\"\"\"\n\n assert (text is not None) ^ (\n tokenized_text is not None\n ), \"Must pass either the raw or tokenized string\"\n if return_prediction:\n kwargs[\n \"return_p_value\"\n ] = True # to return the \"confidence\":=1-p of positive detections\n\n # run optional normalizers on text\n for normalizer in self.normalizers:\n text = normalizer(text)\n if len(self.normalizers) > 0:\n print(f\"Text after normalization:\\n\\n{text}\\n\")\n\n if tokenized_text is None:\n assert self.tokenizer is not None, (\n \"Watermark detection on raw string \",\n \"requires an instance of the tokenizer \",\n \"that was used at generation time.\",\n )\n tokenized_text = self.tokenizer(text, return_tensors=\"pt\", add_special_tokens=False)[\n \"input_ids\"\n ][0].to(self.device)\n if tokenized_text[0] == self.tokenizer.bos_token_id:\n tokenized_text = tokenized_text[1:]\n else:\n # try to remove the bos_tok at beginning if it's there\n if (self.tokenizer is not None) and (tokenized_text[0] == self.tokenizer.bos_token_id):\n tokenized_text = tokenized_text[1:]\n\n # call score method\n output_dict = {}\n\n if window_size is not None:\n # assert window_size <= len(tokenized_text) cannot assert for all new types\n score_dict = self._score_sequence_window(\n tokenized_text,\n window_size=window_size,\n window_stride=window_stride,\n **kwargs,\n )\n output_dict.update(score_dict)\n else:\n score_dict = self._score_sequence(tokenized_text, **kwargs)\n if return_scores:\n output_dict.update(score_dict)\n # if passed return_prediction then perform the hypothesis test and return the outcome\n if return_prediction:\n z_threshold = z_threshold if z_threshold else self.z_threshold\n assert (\n z_threshold is not None\n ), \"Need a threshold in order to decide outcome of detection test\"\n output_dict[\"prediction\"] = score_dict[\"z_score\"] > z_threshold\n if output_dict[\"prediction\"]:\n output_dict[\"confidence\"] = 1 - score_dict[\"p_value\"]\n\n # convert any numerical values to float if requested\n if convert_to_float:\n for key, value in output_dict.items():\n if isinstance(value, int):\n output_dict[key] = float(value)\n\n return output_dict" }, { "identifier": "AarWatermarkDetector", "path": "aar_watermark.py", "snippet": "class AarWatermarkDetector:\n def __init__(\n self,\n tokenizer: AutoTokenizer,\n k: int = 1,\n seed: int = DEFAULT_SEED,\n eps: float = 1e-20,\n ):\n generator = torch.Generator() # generator is always cpu for reproducibility\n generator.manual_seed(seed)\n vocab_size = len(tokenizer)\n self.uniform = torch.clamp(\n torch.rand((vocab_size * k, vocab_size), generator=generator, dtype=torch.float32),\n min=eps,\n max=1 - eps,\n )\n\n self.tokenizer = tokenizer\n self.k = k\n self.seed = seed\n self.eps = eps\n self.vocab_size = vocab_size\n \n def detect(self, text: str) -> float:\n \"\"\"\n Returns p-value, where null hypothesis is that the text is not watermarked.\n \n Under null hypothesis, each u is Uniform(0, 1), so each score (-log(1 -u )) is Exp(1).\n So the sum of scores is distributed as Gamma(n_tokens, 1).\n \"\"\"\n tokens = self.tokenizer.encode(text, return_tensors=\"pt\", add_special_tokens=False)[0] # (seq_len,)\n seq_len = tokens.shape[0]\n score = 0\n # TODO tensorize\n for i in range(self.k, seq_len):\n prev_tokens_sum = torch.sum(tokens[i - self.k:i], dim=-1)\n token = tokens[i]\n u = self.uniform[prev_tokens_sum, token]\n score += -torch.log(1 - u)\n p_value = scipy.stats.gamma.sf(score, seq_len - self.k, loc=0, scale=1)\n return p_value" } ]
import argparse import os import json import numpy as np import mauve import torch from torch.nn import CrossEntropyLoss from transformers import AutoTokenizer, AutoModelForCausalLM from tqdm import tqdm from kgw_watermarking.watermark_reliability_release.watermark_processor import WatermarkDetector from aar_watermark import AarWatermarkDetector
5,824
DEFAULT_SEED = 42 device = "cuda" if torch.cuda.is_available() else "cpu" parser = argparse.ArgumentParser() parser.add_argument("--tokenizer_name", type=str, required=True) parser.add_argument("--watermark_tokenizer_name", type=str, default=None) parser.add_argument("--truncate", action="store_true", default=False) parser.add_argument("--num_tokens", type=int, default=200) parser.add_argument("--lm_score_model_name", type=str, required=True) parser.add_argument("--input_file", type=str, required=True) parser.add_argument("--output_file", type=str, required=True) parser.add_argument("--text_field", type=str, default="full_model_text") parser.add_argument("--batch_size", type=int, default=32) parser.add_argument("--overwrite_output_file", action="store_true", default=False) parser.add_argument("--fp16", action="store_true", default=False) parser.add_argument("--kgw_device", type=str, default="cpu", choices=["cpu", "cuda"]) parser.add_argument("--mauve_max_length", type=int, default=200) args = parser.parse_args() device = "cuda" if torch.cuda.is_available() else "cpu" if os.path.exists(args.output_file) and not args.overwrite_output_file: raise ValueError(f"Output file {args.output_file} already exists and overwrite_output_file is False") with open(args.input_file, "r") as f: data = json.load(f) samples_dict = data["samples"] prompt_length = data["prompt_length"] if args.watermark_tokenizer_name is None: args.watermark_tokenizer_name = args.tokenizer_name tokenizer = AutoTokenizer.from_pretrained(args.watermark_tokenizer_name) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token compute_metrics_args_dict = {} compute_metrics_args_dict.update(vars(args)) data["compute_metrics_args_dict"] = compute_metrics_args_dict def save_data(): os.makedirs(os.path.dirname(args.output_file), exist_ok=True) with open(args.output_file, "w") as f: print(f"Writing output to {args.output_file}") json.dump(data, f, indent=4) # compute watermark p-values for model_name, sd in tqdm(samples_dict.items()): if 'watermark_config' in samples_dict[model_name]: watermark_config = samples_dict[model_name]['watermark_config'] if isinstance(watermark_config, list): watermark_config = watermark_config[0] else: #print(f"Skipping {model_name}, no watermark config") #continue print(f"{model_name}, no watermark config, parsing string") watermark_config = {} if 'aar' in model_name or "k" in watermark_config: if not watermark_config: aar_s = "aar-k" k = int(model_name[model_name.find(aar_s) + len(aar_s)]) seed = DEFAULT_SEED print(f"{k=}, {seed=}") detector = AarWatermarkDetector( k=k, seed=seed, tokenizer=tokenizer, ) else: detector = AarWatermarkDetector( k=watermark_config["k"], seed=watermark_config.get("seed", DEFAULT_SEED), tokenizer=tokenizer, ) elif 'kth' in model_name: # KTH detection in kth_watermarking/compute_kth_scores.py, takes long time print(f"Skipping {model_name}, KTH watermark") continue elif 'kgw' in model_name or "gamma" in watermark_config: print(f"gamma = {watermark_config.get('gamma', 0.25)}")
DEFAULT_SEED = 42 device = "cuda" if torch.cuda.is_available() else "cpu" parser = argparse.ArgumentParser() parser.add_argument("--tokenizer_name", type=str, required=True) parser.add_argument("--watermark_tokenizer_name", type=str, default=None) parser.add_argument("--truncate", action="store_true", default=False) parser.add_argument("--num_tokens", type=int, default=200) parser.add_argument("--lm_score_model_name", type=str, required=True) parser.add_argument("--input_file", type=str, required=True) parser.add_argument("--output_file", type=str, required=True) parser.add_argument("--text_field", type=str, default="full_model_text") parser.add_argument("--batch_size", type=int, default=32) parser.add_argument("--overwrite_output_file", action="store_true", default=False) parser.add_argument("--fp16", action="store_true", default=False) parser.add_argument("--kgw_device", type=str, default="cpu", choices=["cpu", "cuda"]) parser.add_argument("--mauve_max_length", type=int, default=200) args = parser.parse_args() device = "cuda" if torch.cuda.is_available() else "cpu" if os.path.exists(args.output_file) and not args.overwrite_output_file: raise ValueError(f"Output file {args.output_file} already exists and overwrite_output_file is False") with open(args.input_file, "r") as f: data = json.load(f) samples_dict = data["samples"] prompt_length = data["prompt_length"] if args.watermark_tokenizer_name is None: args.watermark_tokenizer_name = args.tokenizer_name tokenizer = AutoTokenizer.from_pretrained(args.watermark_tokenizer_name) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token compute_metrics_args_dict = {} compute_metrics_args_dict.update(vars(args)) data["compute_metrics_args_dict"] = compute_metrics_args_dict def save_data(): os.makedirs(os.path.dirname(args.output_file), exist_ok=True) with open(args.output_file, "w") as f: print(f"Writing output to {args.output_file}") json.dump(data, f, indent=4) # compute watermark p-values for model_name, sd in tqdm(samples_dict.items()): if 'watermark_config' in samples_dict[model_name]: watermark_config = samples_dict[model_name]['watermark_config'] if isinstance(watermark_config, list): watermark_config = watermark_config[0] else: #print(f"Skipping {model_name}, no watermark config") #continue print(f"{model_name}, no watermark config, parsing string") watermark_config = {} if 'aar' in model_name or "k" in watermark_config: if not watermark_config: aar_s = "aar-k" k = int(model_name[model_name.find(aar_s) + len(aar_s)]) seed = DEFAULT_SEED print(f"{k=}, {seed=}") detector = AarWatermarkDetector( k=k, seed=seed, tokenizer=tokenizer, ) else: detector = AarWatermarkDetector( k=watermark_config["k"], seed=watermark_config.get("seed", DEFAULT_SEED), tokenizer=tokenizer, ) elif 'kth' in model_name: # KTH detection in kth_watermarking/compute_kth_scores.py, takes long time print(f"Skipping {model_name}, KTH watermark") continue elif 'kgw' in model_name or "gamma" in watermark_config: print(f"gamma = {watermark_config.get('gamma', 0.25)}")
detector = WatermarkDetector(
0
2023-12-07 16:45:33+00:00
8k
skyoux/SemAIM
main_finetune.py
[ { "identifier": "ImageListFolder", "path": "datasets/datasets.py", "snippet": "class ImageListFolder(datasets.ImageFolder):\n def __init__(self, root, transform=None, target_transform=None,\n ann_file=None, loader=default_loader):\n self.root = root\n self.transform = transform\n self.loader = loader\n self.target_transform = target_transform\n self.nb_classes = 1000\n\n assert ann_file is not None\n print('load info from', ann_file)\n\n self.samples = []\n ann = open(ann_file)\n for elem in ann.readlines():\n cut = elem.split(' ')\n path_current = os.path.join(root, cut[0])\n target_current = int(cut[1])\n self.samples.append((path_current, target_current))\n ann.close()\n\n print('load finish')" }, { "identifier": "build_transform", "path": "datasets/datasets.py", "snippet": "def build_transform(is_train, args):\n mean = IMAGENET_DEFAULT_MEAN\n std = IMAGENET_DEFAULT_STD\n # train transform\n if is_train:\n # this should always dispatch to transforms_imagenet_train\n transform = create_transform(\n input_size=args.input_size,\n is_training=True,\n color_jitter=args.color_jitter,\n auto_augment=args.aa,\n interpolation='bicubic',\n re_prob=args.reprob,\n re_mode=args.remode,\n re_count=args.recount,\n mean=mean,\n std=std,\n )\n return transform\n\n # eval transform\n t = []\n if args.input_size <= 224:\n crop_pct = 224 / 256\n else:\n crop_pct = 1.0\n size = int(args.input_size / crop_pct)\n t.append(\n transforms.Resize(size, interpolation=torchvision.transforms.InterpolationMode.BICUBIC), # to maintain same ratio w.r.t. 224 images\n )\n t.append(transforms.CenterCrop(args.input_size))\n\n t.append(transforms.ToTensor())\n t.append(transforms.Normalize(mean, std))\n return transforms.Compose(t)" }, { "identifier": "interpolate_pos_embed", "path": "util/pos_embed.py", "snippet": "def interpolate_pos_embed(model, checkpoint_model):\n if 'pos_embed' in checkpoint_model:\n pos_embed_checkpoint = checkpoint_model['pos_embed']\n embedding_size = pos_embed_checkpoint.shape[-1]\n num_patches = model.patch_embed.num_patches\n num_extra_tokens = model.pos_embed.shape[-2] - num_patches\n # height (== width) for the checkpoint position embedding\n orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)\n # height (== width) for the new position embedding\n new_size = int(num_patches ** 0.5)\n # class_token and dist_token are kept unchanged\n if orig_size != new_size:\n print(\"Position interpolate from %dx%d to %dx%d\" % (orig_size, orig_size, new_size, new_size))\n extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]\n # only the position tokens are interpolated\n pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]\n pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)\n pos_tokens = torch.nn.functional.interpolate(\n pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)\n pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)\n new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)\n checkpoint_model['pos_embed'] = new_pos_embed" }, { "identifier": "NativeScalerWithGradNormCount", "path": "util/misc.py", "snippet": "class NativeScalerWithGradNormCount:\n state_dict_key = \"amp_scaler\"\n\n def __init__(self):\n self._scaler = torch.cuda.amp.GradScaler()\n\n def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):\n self._scaler.scale(loss).backward(create_graph=create_graph)\n if update_grad:\n if clip_grad is not None:\n assert parameters is not None\n self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place\n norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)\n else:\n self._scaler.unscale_(optimizer)\n norm = get_grad_norm_(parameters)\n self._scaler.step(optimizer)\n self._scaler.update()\n else:\n norm = None\n return norm\n\n def state_dict(self):\n return self._scaler.state_dict()\n\n def load_state_dict(self, state_dict):\n self._scaler.load_state_dict(state_dict)" }, { "identifier": "models_vit", "path": "models/models_vit.py", "snippet": "class VisionTransformer(timm.models.vision_transformer.VisionTransformer):\n def __init__(self, global_pool=False, **kwargs):\n def forward_features(self, x):\n def forward_head(self, x):\ndef vit_small_patch16(**kwargs):\ndef vit_base_patch16(**kwargs):\ndef vit_large_patch16(**kwargs):\ndef vit_huge_patch14(**kwargs):\n B = x.shape[0]" }, { "identifier": "train_one_epoch", "path": "engines/engine_finetune.py", "snippet": "def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,\n data_loader: Iterable, optimizer: torch.optim.Optimizer,\n device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,\n mixup_fn: Optional[Mixup] = None, log_writer=None,\n args=None):\n model.train(True)\n metric_logger = misc.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', misc.SmoothedValue(window_size=1, fmt='{value:.6f}'))\n header = 'Epoch: [{}/{}]'.format(epoch, args.epochs)\n print_freq = 20\n\n accum_iter = args.accum_iter\n\n optimizer.zero_grad()\n\n if log_writer is not None:\n print('log_dir: {}'.format(log_writer.log_dir))\n\n for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n\n # we use a per iteration (instead of per epoch) lr scheduler\n if data_iter_step % accum_iter == 0:\n lr_sched.adjust_learning_rate(optimizer, data_iter_step / len(data_loader) + epoch, args)\n\n samples = samples.to(device, non_blocking=True)\n targets = targets.to(device, non_blocking=True)\n\n if mixup_fn is not None:\n samples, targets = mixup_fn(samples, targets)\n\n with torch.cuda.amp.autocast():\n outputs = model(samples)\n loss = criterion(outputs, targets)\n\n loss_value = loss.item()\n\n if not math.isfinite(loss_value):\n print(\"Loss is {}, stopping training\".format(loss_value))\n sys.exit(1)\n\n loss /= accum_iter\n loss_scaler(loss, optimizer, clip_grad=max_norm,\n parameters=model.parameters(), create_graph=False,\n update_grad=(data_iter_step + 1) % accum_iter == 0)\n if (data_iter_step + 1) % accum_iter == 0:\n optimizer.zero_grad()\n\n torch.cuda.synchronize()\n\n metric_logger.update(loss=loss_value)\n min_lr = 10.\n max_lr = 0.\n for group in optimizer.param_groups:\n min_lr = min(min_lr, group[\"lr\"])\n max_lr = max(max_lr, group[\"lr\"])\n\n metric_logger.update(lr=max_lr)\n\n loss_value_reduce = misc.all_reduce_mean(loss_value)\n if log_writer is not None and (data_iter_step + 1) % accum_iter == 0:\n \"\"\" We use epoch_1000x as the x-axis in tensorboard.\n This calibrates different curves when batch size changes.\n \"\"\"\n epoch_1000x = int((data_iter_step / len(data_loader) + epoch) * 1000)\n log_writer.add_scalar('loss', loss_value_reduce, epoch_1000x)\n log_writer.add_scalar('lr', max_lr, epoch_1000x)\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger)\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}" }, { "identifier": "evaluate", "path": "engines/engine_finetune.py", "snippet": "@torch.no_grad()\ndef evaluate(data_loader, model, device):\n criterion = torch.nn.CrossEntropyLoss()\n\n metric_logger = misc.MetricLogger(delimiter=\" \")\n header = 'Test:'\n\n # switch to evaluation mode\n model.eval()\n\n for batch in metric_logger.log_every(data_loader, 10, header):\n images = batch[0]\n target = batch[-1]\n images = images.to(device, non_blocking=True)\n target = target.to(device, non_blocking=True)\n\n # compute output\n with torch.cuda.amp.autocast():\n output = model(images)\n loss = criterion(output, target)\n\n acc1, acc5 = accuracy(output, target, topk=(1, 5))\n\n batch_size = images.shape[0]\n metric_logger.update(loss=loss.item())\n metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)\n metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'\n .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))\n\n return {k: meter.global_avg for k, meter in metric_logger.meters.items()}" } ]
import argparse import datetime import json import numpy as np import os import time import builtins import torch import torch.backends.cudnn as cudnn import timm import util.lr_decay as lrd import util.misc as misc from pathlib import Path from torch.utils.tensorboard import SummaryWriter from timm.models.layers import trunc_normal_ from timm.data.mixup import Mixup from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from datasets.datasets import ImageListFolder, build_transform from util.pos_embed import interpolate_pos_embed from util.misc import NativeScalerWithGradNormCount as NativeScaler from models import models_vit from engines.engine_finetune import train_one_epoch, evaluate
5,515
shuffle=False, ) mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: print("Mixup is activated!") mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) model = models_vit.__dict__[args.model]( num_classes=args.nb_classes, drop_path_rate=args.drop_path, global_pool=args.global_pool, ) if args.finetune and not args.eval: # load pretrained model checkpoint = torch.load(args.finetune, map_location='cpu') print("Load pre-trained checkpoint from: %s" % args.finetune) if 'state_dict' in checkpoint: checkpoint_model = checkpoint['state_dict'] else: checkpoint_model = checkpoint['model'] state_dict = model.state_dict() checkpoint_model = {k.replace("module.", ""): v for k, v in checkpoint_model.items()} for k in ['head.weight', 'head.bias']: if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: print(f"Removing key {k} from pretrained checkpoint") del checkpoint_model[k] # interpolate position embedding interpolate_pos_embed(model, checkpoint_model) # load pre-trained model msg = model.load_state_dict(checkpoint_model, strict=False) print(msg) print("global_pool = ", args.global_pool) if args.global_pool: assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'} else: assert set(msg.missing_keys) == {'head.weight', 'head.bias'} # manually initialize fc layer trunc_normal_(model.head.weight, std=2e-5) model.to(device) model_without_ddp = model n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) # print("Model = %s" % str(model_without_ddp)) print('number of params (M): %.2f' % (n_parameters / 1.e6)) eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() if args.lr is None: # only base_lr is specified args.lr = args.blr * eff_batch_size / 256 print("base lr: %.2e" % (args.lr * 256 / eff_batch_size)) print("actual lr: %.2e" % args.lr) print("accumulate grad iterations: %d" % args.accum_iter) print("effective batch size: %d" % eff_batch_size) if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_without_ddp = model.module # build optimizer with layer-wise lr decay (lrd) param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay, no_weight_decay_list=model_without_ddp.no_weight_decay(), layer_decay=args.layer_decay ) optimizer = torch.optim.AdamW(param_groups, lr=args.lr) loss_scaler = NativeScaler() if mixup_fn is not None: # smoothing is handled with mixup label transform criterion = SoftTargetCrossEntropy() elif args.smoothing > 0.: criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: criterion = torch.nn.CrossEntropyLoss() print("criterion = %s" % str(criterion)) # resume model ckpt_path = os.path.join(args.output_dir, f"{args.model}.{args.experiment}.temp.pth") if not os.path.isfile(ckpt_path): print("Checkpoint not founded in {}, train from random initialization".format(ckpt_path)) else: print("Found checkpoint at {}".format(ckpt_path)) misc.load_model(args=args, ckpt_path=ckpt_path, model_without_ddp=model, optimizer=optimizer, loss_scaler=loss_scaler) if args.eval: test_stats = evaluate(data_loader_val, model, device) print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") exit(0) if global_rank == 0 and args.log_dir is not None and not args.eval: log_dir = os.path.join(args.log_dir, f"{args.model}.{args.experiment}") os.makedirs(log_dir, exist_ok=True) log_writer = SummaryWriter(log_dir=log_dir) else: log_writer = None print(f"Start training for {args.epochs} epochs") start_time = time.time() max_accuracy = 0.0 for epoch in range(args.start_epoch, args.epochs): if args.distributed: data_loader_train.sampler.set_epoch(epoch)
# This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # References: # DeiT: https://github.com/facebookresearch/deit # BEiT: https://github.com/microsoft/unilm/tree/master/beit # MAE: https://github.com/facebookresearch/mae # -------------------------------------------------------- # assert timm.__version__ == "0.3.2" # version check def get_args_parser(): parser = argparse.ArgumentParser('UM-MAE fine-tuning for image classification', add_help=False) parser.add_argument('--batch_size', default=64, type=int, help='Batch size per GPU (effective batch size is batch_size * accum_iter * # gpus') parser.add_argument('--epochs', default=50, type=int) parser.add_argument('--accum_iter', default=1, type=int, help='Accumulate gradient iterations (for increasing the effective batch size under memory constraints)') # Model parameters parser.add_argument('--model', default='vit_large_patch16', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--input_size', default=224, type=int, help='images input size') parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') # Optimizer parameters parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--lr', type=float, default=None, metavar='LR', help='learning rate (absolute lr)') parser.add_argument('--blr', type=float, default=1e-3, metavar='LR', help='base learning rate: absolute_lr = base_lr * total_batch_size / 256') parser.add_argument('--layer_decay', type=float, default=0.75, help='layer-wise lr decay from ELECTRA/BEiT') parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0') parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR') # Augmentation parameters parser.add_argument('--color_jitter', type=float, default=None, metavar='PCT', help='Color jitter factor (enabled only when not using Auto/RandAug)') parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'), parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') # * Random Erase params parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') # * Mixup params parser.add_argument('--mixup', type=float, default=0, help='mixup alpha, mixup enabled if > 0.') parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha, cutmix enabled if > 0.') parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup_prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup_switch_prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup_mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') # * Finetuning params parser.add_argument('--finetune', default='', help='finetune from checkpoint') parser.add_argument('--global_pool', action='store_true') parser.set_defaults(global_pool=True) parser.add_argument('--cls_token', action='store_false', dest='global_pool', help='Use class token instead of global pool for classification') # Dataset parameters parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path') parser.add_argument('--nb_classes', default=1000, type=int, help='number of the classification types') parser.add_argument('--output_dir', default='./output_dir', help='path where to save, empty for no saving') parser.add_argument('--log_dir', default='./output_dir', help='path where to tensorboard log') parser.add_argument('--saveckp_freq', default=20, type=int, help='Save checkpoint every x epochs.') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--experiment', default='exp', type=str, help='experiment name (for log)') parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--dist_eval', action='store_true', default=False, help='Enabling distributed evaluation (recommended during training for faster monitor') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') parser.set_defaults(pin_mem=False) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') parser.add_argument('--dist_backend', default='nccl', type=str, help='experiment name (for log)') return parser def main(args): misc.init_distributed_mode(args) print('job dir: {}'.format(os.path.dirname(os.path.realpath(__file__)))) print("{}".format(args).replace(', ', ',\n')) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + misc.get_rank() torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True transform_train = build_transform(is_train=True, args=args) transform_val = build_transform(is_train=False, args=args) dataset_train = ImageListFolder(os.path.join(args.data_path, 'train'), transform=transform_train, ann_file=os.path.join(args.data_path, 'train.txt')) print(dataset_train) num_tasks = misc.get_world_size() global_rank = misc.get_rank() sampler_train = torch.utils.data.DistributedSampler( dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True ) print("Sampler_train = %s" % str(sampler_train)) data_loader_train = torch.utils.data.DataLoader( dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, ) dataset_val = ImageListFolder(os.path.join(args.data_path, 'train'), transform=transform_val, ann_file=os.path.join(args.data_path, 'train.txt')) num_tasks = misc.get_world_size() global_rank = misc.get_rank() sampler_val = torch.utils.data.DistributedSampler( dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False ) print("Sampler_val = %s" % str(sampler_val)) data_loader_val = torch.utils.data.DataLoader( dataset_val, sampler=sampler_val, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False, shuffle=False, ) mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: print("Mixup is activated!") mixup_fn = Mixup( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.nb_classes) model = models_vit.__dict__[args.model]( num_classes=args.nb_classes, drop_path_rate=args.drop_path, global_pool=args.global_pool, ) if args.finetune and not args.eval: # load pretrained model checkpoint = torch.load(args.finetune, map_location='cpu') print("Load pre-trained checkpoint from: %s" % args.finetune) if 'state_dict' in checkpoint: checkpoint_model = checkpoint['state_dict'] else: checkpoint_model = checkpoint['model'] state_dict = model.state_dict() checkpoint_model = {k.replace("module.", ""): v for k, v in checkpoint_model.items()} for k in ['head.weight', 'head.bias']: if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: print(f"Removing key {k} from pretrained checkpoint") del checkpoint_model[k] # interpolate position embedding interpolate_pos_embed(model, checkpoint_model) # load pre-trained model msg = model.load_state_dict(checkpoint_model, strict=False) print(msg) print("global_pool = ", args.global_pool) if args.global_pool: assert set(msg.missing_keys) == {'head.weight', 'head.bias', 'fc_norm.weight', 'fc_norm.bias'} else: assert set(msg.missing_keys) == {'head.weight', 'head.bias'} # manually initialize fc layer trunc_normal_(model.head.weight, std=2e-5) model.to(device) model_without_ddp = model n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) # print("Model = %s" % str(model_without_ddp)) print('number of params (M): %.2f' % (n_parameters / 1.e6)) eff_batch_size = args.batch_size * args.accum_iter * misc.get_world_size() if args.lr is None: # only base_lr is specified args.lr = args.blr * eff_batch_size / 256 print("base lr: %.2e" % (args.lr * 256 / eff_batch_size)) print("actual lr: %.2e" % args.lr) print("accumulate grad iterations: %d" % args.accum_iter) print("effective batch size: %d" % eff_batch_size) if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) model_without_ddp = model.module # build optimizer with layer-wise lr decay (lrd) param_groups = lrd.param_groups_lrd(model_without_ddp, args.weight_decay, no_weight_decay_list=model_without_ddp.no_weight_decay(), layer_decay=args.layer_decay ) optimizer = torch.optim.AdamW(param_groups, lr=args.lr) loss_scaler = NativeScaler() if mixup_fn is not None: # smoothing is handled with mixup label transform criterion = SoftTargetCrossEntropy() elif args.smoothing > 0.: criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: criterion = torch.nn.CrossEntropyLoss() print("criterion = %s" % str(criterion)) # resume model ckpt_path = os.path.join(args.output_dir, f"{args.model}.{args.experiment}.temp.pth") if not os.path.isfile(ckpt_path): print("Checkpoint not founded in {}, train from random initialization".format(ckpt_path)) else: print("Found checkpoint at {}".format(ckpt_path)) misc.load_model(args=args, ckpt_path=ckpt_path, model_without_ddp=model, optimizer=optimizer, loss_scaler=loss_scaler) if args.eval: test_stats = evaluate(data_loader_val, model, device) print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%") exit(0) if global_rank == 0 and args.log_dir is not None and not args.eval: log_dir = os.path.join(args.log_dir, f"{args.model}.{args.experiment}") os.makedirs(log_dir, exist_ok=True) log_writer = SummaryWriter(log_dir=log_dir) else: log_writer = None print(f"Start training for {args.epochs} epochs") start_time = time.time() max_accuracy = 0.0 for epoch in range(args.start_epoch, args.epochs): if args.distributed: data_loader_train.sampler.set_epoch(epoch)
train_stats = train_one_epoch(
5
2023-12-10 15:17:11+00:00
8k
boweniac/autogan
autogan/utils/compressed_text_utils.py
[ { "identifier": "LLMConfig", "path": "autogan/oai/config_utils.py", "snippet": "class LLMConfig:\n \"\"\"LLM config object\n \"\"\"\n\n def __init__(\n self,\n api_key_list: ConfigList,\n max_messages_tokens: str,\n request_interval_time: int,\n request_timeout: int,\n max_retries: int\n ):\n self._api_key_list = api_key_list\n self._max_messages_tokens = max_messages_tokens\n self._request_interval_time = request_interval_time\n self._request_timeout = request_timeout\n self._max_retries = max_retries\n\n def api_key(self, index):\n \"\"\"Get the one configuration in the api_key_list.\n \"\"\"\n return self._api_key_list.get_config(index)\n\n @property\n def next_api_key(self):\n \"\"\"Get the next configuration in the api_key_list.\n \"\"\"\n return self._api_key_list.get_next_config\n\n @property\n def len_of_api_key_list(self) -> int:\n \"\"\"Get the first configuration in the api_key_list list.\n \"\"\"\n return self._api_key_list.len\n\n @property\n def model(self):\n \"\"\"Get the model of the first configuration in the api_key_list list.\n \"\"\"\n return self._api_key_list.get_first_config[\"model\"]\n\n @property\n def max_messages_tokens(self):\n \"\"\"Limit the maximum tokens of the context in each dialogue.\n \"\"\"\n return self._max_messages_tokens\n\n @property\n def request_interval_time(self):\n return self._request_interval_time\n\n @property\n def request_timeout(self):\n return self._request_timeout\n\n @property\n def max_retries(self):\n return self._max_retries" }, { "identifier": "count_text_tokens", "path": "autogan/oai/count_tokens_utils.py", "snippet": "def count_text_tokens(text: str, model: Optional[str] = \"gpt-3.5-turbo\") -> int:\n \"\"\"Calculate the tokens of the text.\n\n :param text: The text to be tokenized\n :param model: Calculate tokens for a specific model. If the model is not listed, it will default to calculating the number of tokens based on the gpt-3.5-turbo standard.\n\n :return: tokens\n \"\"\"\n\n if not text:\n return 0\n\n model_list = ['gpt-4', 'gpt-3.5-turbo-16k', 'gpt-3.5-turbo']\n if model not in model_list:\n model = \"gpt-3.5-turbo\"\n\n try:\n encoding = tiktoken.encoding_for_model(model)\n num_tokens = len(encoding.encode(text))\n except Exception as e:\n print(e)\n num_tokens = 0\n\n return num_tokens" }, { "identifier": "environment_info", "path": "autogan/utils/environment_utils.py", "snippet": "def environment_info() -> str:\n \"\"\"Current environment information\n\n :return: --current_time: Y.m.d H:M:S week:%w\n \"\"\"\n info = f'current time: {get_time()}'\n\n return info" }, { "identifier": "generate_chat_completion", "path": "autogan/oai/generate_utils.py", "snippet": "def generate_chat_completion(llm_config: LLMConfig, messages: List, agent_name: str, gen: str,\n response_func: ResponseFuncType, stream_mode: Optional[bool] = None)\\\n -> tuple[Optional[str], Optional[int]]:\n \"\"\"Call the LLM interface\n\n Currently, only the chatgpt model of openai (including azure) is adapted.\n\n :param llm_config: LLM configuration.\n :param messages:\n :param agent_name:\n :param gen: Used to distinguish agent replies, deep thoughts, context compression, general summaries, clue summaries\n - main: agent replies\n - idea: deep thoughts\n - messages_summary: context compression\n - text_summary: general summaries\n - clue_summary: clue summaries\n :param response_func: Used to return results to the interface or terminal.\n :param stream_mode:\n \"\"\"\n\n # When a certain configuration in the configuration list fails to request,\n # continue to try the next configuration until all configurations in the list are attempted.\n loop = llm_config.len_of_api_key_list\n for i in range(loop):\n time.sleep(llm_config.request_interval_time)\n api_key = llm_config.next_api_key\n try:\n completion_content = \"\"\n completion_tokens = 0\n index = 1\n for message in chat_completions(messages, api_key, llm_config.request_timeout,\n llm_config.max_retries, stream_mode):\n content = \"\"\n if stream_mode:\n if (message and \"choices\" in message and \"delta\" in message[\"choices\"][0]\n and \"content\" in message[\"choices\"][0][\"delta\"]\n and message[\"choices\"][0][\"delta\"][\"content\"]):\n content = message[\"choices\"][0][\"delta\"][\"content\"]\n completion_content += content\n else:\n if (message and \"choices\" in message and \"message\" in message[\"choices\"][0]\n and \"content\" in message[\"choices\"][0][\"message\"]\n and message[\"choices\"][0][\"message\"][\"content\"]):\n content = message[\"choices\"][0][\"message\"][\"content\"]\n completion_content = content\n if message and \"usage\" in message and \"completion_tokens\" in message[\"usage\"]:\n completion_tokens = message[\"usage\"][\"completion_tokens\"]\n response_func(agent_name, gen, api_key[\"model\"], stream_mode, index, content, completion_tokens, message)\n if content:\n index += 1\n\n if completion_content:\n if completion_tokens == 0:\n completion_tokens = count_text_tokens(completion_content, api_key['model'])\n return completion_content, completion_tokens\n else:\n raise ValueError(\"The return value is empty.\")\n except Exception as e:\n if i == loop - 1:\n print(f\"generate_chat_completion Exception: {e}\")\n return None, None" }, { "identifier": "ResponseFuncType", "path": "autogan/utils/response.py", "snippet": " def colored(x, *args, **kwargs):\ndef default_response_func(agent_name: str, gen: str, model: str, stream_mode: bool, index: int,\n content: Optional[str], tokens: Optional[int], response: any):\ndef obj_to_dict(obj):" } ]
import math import re from typing import Optional, List from autogan.oai.config_utils import LLMConfig from autogan.oai.count_tokens_utils import count_text_tokens from autogan.utils.environment_utils import environment_info from autogan.oai.generate_utils import generate_chat_completion from autogan.utils.response import ResponseFuncType
4,571
--total_tokens: Total tokens after compression. 压缩后的整体tokens。 """ compressed_text = "" total_tokens = 0 split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model) # Calculate the approximate size of the text slices proportionally split_safe_size = int(safe_size / len(split_texts)) for st in split_texts: content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode, split_safe_size) if content: compressed_text += content + "\n" total_tokens += tokens if compressed_text: return compressed_text, total_tokens else: return None, None def generate_text_summary(text: str, summary_model_config: LLMConfig, agent_name: str, response_func: ResponseFuncType, stream_mode: Optional[bool] = None, safe_size: Optional[int] = None) \ -> tuple[str, int]: """Generate a general summary of the text 生成文本普通摘要 :param text: Text to be compressed. 待压缩的文本。 :param summary_model_config: LLM configuration used for text compression. 用于压缩文本的 LLM 配置。 :param agent_name: :param response_func: Used to return results to the interface or terminal. 用于向接口或终端返回结果 :param stream_mode: :param safe_size: The target size of the text after compression, if not provided there is no limit. 文本压缩后的目标尺寸,如果为空则不做限制。 :return: --compressed_text: The text after compression. 压缩后的文本。 --total_tokens: Total tokens after compression. 压缩后的整体tokens。 """ if safe_size: system_prompt = """I hope you are an article filter and refiner, filtering and refining the articles sent by users. Please ensure that your summary does not exceed the limit of max_tokens. When the content of the article is not enough to refine, please omit other polite language and only output one word: None. If the article can be refined, please help me filter out sensitive content related to politics, geopolitics, violence, and sex from the article, and extract the main content from the article. Please note that the description perspective and chapter structure of the extracted content should be as consistent as possible with the original text, and try to retain details for subsequent reasoning. Please omit other polite language and only output the refined content.""" chat_prompt = f"max_tokens: {safe_size}\n\nArticle content:\n{text}" # system_prompt = """我希望你是一个文章过滤与提炼器,过滤和提炼用户发送的文章,请确保您的总结不超过 max_tokens 的限制. # 当文章内容不足以提炼时,请省略其他客套用语,仅输出一个单词:None。 # 如果文章可以精炼请帮我滤掉文章中与政治、地缘政治、暴力、性等有关的敏感内容,并从文章中提炼出主要内容. # 注意提炼出的内容其描述视角和章节结构尽量与原文一致,并尽可能的保留细节以用于后续推理,请省略其他客套用语,仅输出提炼好的内容。""" # chat_prompt = f"max_tokens: {safe_size}\n\n文章内容:\n\n{text}" else: system_prompt = """I hope you can serve as an article filter and refiner, filtering and refining the articles sent by users. If the content of the article is insufficient for refinement, please omit other polite phrases and output only one word: None. If the article can be refined, please help me filter out sensitive content related to politics, geopolitics, violence, and sex from the article, and extract the main content from the article. Please note that the perspective and chapter structure of the extracted content should be as consistent with the original as possible, and retain as many details as possible for subsequent reasoning. Please omit other polite phrases and only output the refined content.""" chat_prompt = f"Article content:\n{text}" # system_prompt = """我希望你是一个文章过滤与提炼器,过滤和提炼用户发送的文章。当文章内容不足以提炼时,请省略其他客套用语,仅输出一个单词:None。 # 如果文章可以精炼请帮我滤掉文章中与政治、地缘政治、暴力、性等有关的敏感内容,并从文章中提炼出主要内容。 # 注意提炼出的内容其描述视角和章节结构尽量与原文一致,并尽可能的保留细节以用于后续推理。请省略其他客套用语,仅输出提炼好的内容。""" # chat_prompt = f"文章内容:\n{text}" chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': chat_prompt}] return generate_chat_completion(summary_model_config, chat_messages, agent_name, "text_summary", response_func, stream_mode) def generate_text_clues(text: str, focus: str, summary_model_config: LLMConfig, agent_name: str, response_func: ResponseFuncType, stream_mode: Optional[bool] = None) -> tuple[str, int]: """Generate a clue summary of the text 生成文本线索摘要 :param text: Text to be compressed. 待压缩的文本。 :param focus: The focus direction when compressing text. 压缩文本时的专注方向。 :param summary_model_config: LLM configuration used for text compression. 用于压缩文本的 LLM 配置。 :param agent_name: :param response_func: Used to return results to the interface or terminal. 用于向接口或终端返回结果 :param stream_mode: :return: --compressed_text: The text after compression. 压缩后的文本。 --total_tokens: Total tokens after compression. 压缩后的整体tokens。 """ info = environment_info() system_prompt = """I hope you are an agent who is good at discovering the truth in real-time, capable of finding content that helps infer the answer to the question from the information sent by users. Please note that if the content of the information has no extractable value, please omit other polite expressions and output only one word: None. Also, please help me filter out sensitive content related to politics, geopolitics, violence, and sex in the information.""" # system_prompt = """我希望你是一个善于发现实时真相的探员, 能从用户发送的资料中帮我找到有助于推断出问题答案的内容。 # 需要注意的是,如果资料内容没有可提取的价值,请省略其他客套用语,仅输出一个单词:None。另外还请帮我过滤掉资料中与政治、地缘政治、暴力、性等有关的敏感内容。""" chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': f'The current question is:{focus}\n\nEnvironmental information:\n{info}\n\nMaterial content:\n\n{text}'}] # chat_messages = [{'role': 'user', 'content': f'当前的问题是:{focus}\n\n环境信息:\n{info}\n\n资料内容:\n\n{text}'}] return generate_chat_completion(summary_model_config, chat_messages, agent_name, "clue_summary", response_func, stream_mode) def split_text(text: str, split_size: int, model: Optional[str] = None) -> List[str]: """Split the long text and store the text slices in a list 将长文本拆分,并将文本切片存储至列表 """ split_texts = []
def compressed_text_universal(text: str, summary_model_config: LLMConfig, agent_name: str, response_func: ResponseFuncType, stream_mode: Optional[bool] = None, focus: Optional[str] = None, safe_size: Optional[int] = None) \ -> tuple[Optional[str], Optional[int]]: """Compress the text, generating either a regular summary or a cue summary. 压缩文本,可生成普通摘要或线索摘要。 First, the long text is sliced, and then a summary is generated for each slice. 首先将长文本切片,然后逐切片的生成摘要。 If the value of the focus parameter is not None, then the attention will be focused on the focus area while generating the summary. 如 focus 参数的值不为 None 则在生成摘要时注意力集中于 focus。 If the value of the safe_size parameter is not None and the length of the initial compression result exceeds the safe_size, the summary will be further compressed, with the compressed size expected to stay within the range of the safe_size. 如 safe_size 参数的值不为 None 且初次压缩结果长度超过 safe_size,则会对摘要进一步压缩,压缩后的大小被期望保持在 safe_size 范围之内。 :param text: Text to be compressed. 待压缩的文本。 :param summary_model_config: LLM configuration used for text compression. 用于压缩文本的 LLM 配置。 :param agent_name: :param response_func: Used to return results to the interface or terminal. 用于向接口或终端返回结果 :param stream_mode: :param focus: The focus direction when compressing text. 压缩文本时的专注方向。 :param safe_size: The target size of the text after compression, if not provided there is no limit. 文本压缩后的目标尺寸,如果为空则不做限制。 :return: --compressed_text: The text after compression. 压缩后的文本。 --total_tokens: Total tokens after compression. 压缩后的整体tokens。 """ compressed_text = "" total_tokens = 0 split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model) for st in split_texts: if focus: content, tokens = generate_text_clues(st, focus, summary_model_config, agent_name, response_func, stream_mode) else: content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode) if content: compressed_text += content + "\n" total_tokens += tokens if compressed_text: if safe_size and safe_size < total_tokens: return compressed_text_into_safe_size(compressed_text, safe_size, summary_model_config, agent_name, response_func, stream_mode) else: return compressed_text, total_tokens else: return None, None def compressed_text_into_safe_size(text: str, safe_size: int, summary_model_config: LLMConfig, agent_name: str, response_func: ResponseFuncType, stream_mode: Optional[bool] = None) \ -> tuple[Optional[str], Optional[int]]: """Compress the text to a safe size 压缩文本至安全尺寸 First, the long text is sliced, and then a summary is generated for each slice. 首先将长文本切片,然后逐切片的生成摘要。 the summary will be further compressed, with the compressed size expected to stay within the range of the safe_size. 压缩后的大小被期望保持在 safe_size 范围之内。 :param text: Text to be compressed. 待压缩的文本。 :param safe_size: The target size of the text after compression. 文本压缩后的目标尺寸。 :param summary_model_config: LLM configuration used for text compression. 用于压缩文本的 LLM 配置。 :param agent_name: :param response_func: Used to return results to the interface or terminal. 用于向接口或终端返回结果 :param stream_mode: :return: --compressed_text: The text after compression. 压缩后的文本。 --total_tokens: Total tokens after compression. 压缩后的整体tokens。 """ compressed_text = "" total_tokens = 0 split_texts = split_text(text, summary_model_config.max_messages_tokens, summary_model_config.model) # Calculate the approximate size of the text slices proportionally split_safe_size = int(safe_size / len(split_texts)) for st in split_texts: content, tokens = generate_text_summary(st, summary_model_config, agent_name, response_func, stream_mode, split_safe_size) if content: compressed_text += content + "\n" total_tokens += tokens if compressed_text: return compressed_text, total_tokens else: return None, None def generate_text_summary(text: str, summary_model_config: LLMConfig, agent_name: str, response_func: ResponseFuncType, stream_mode: Optional[bool] = None, safe_size: Optional[int] = None) \ -> tuple[str, int]: """Generate a general summary of the text 生成文本普通摘要 :param text: Text to be compressed. 待压缩的文本。 :param summary_model_config: LLM configuration used for text compression. 用于压缩文本的 LLM 配置。 :param agent_name: :param response_func: Used to return results to the interface or terminal. 用于向接口或终端返回结果 :param stream_mode: :param safe_size: The target size of the text after compression, if not provided there is no limit. 文本压缩后的目标尺寸,如果为空则不做限制。 :return: --compressed_text: The text after compression. 压缩后的文本。 --total_tokens: Total tokens after compression. 压缩后的整体tokens。 """ if safe_size: system_prompt = """I hope you are an article filter and refiner, filtering and refining the articles sent by users. Please ensure that your summary does not exceed the limit of max_tokens. When the content of the article is not enough to refine, please omit other polite language and only output one word: None. If the article can be refined, please help me filter out sensitive content related to politics, geopolitics, violence, and sex from the article, and extract the main content from the article. Please note that the description perspective and chapter structure of the extracted content should be as consistent as possible with the original text, and try to retain details for subsequent reasoning. Please omit other polite language and only output the refined content.""" chat_prompt = f"max_tokens: {safe_size}\n\nArticle content:\n{text}" # system_prompt = """我希望你是一个文章过滤与提炼器,过滤和提炼用户发送的文章,请确保您的总结不超过 max_tokens 的限制. # 当文章内容不足以提炼时,请省略其他客套用语,仅输出一个单词:None。 # 如果文章可以精炼请帮我滤掉文章中与政治、地缘政治、暴力、性等有关的敏感内容,并从文章中提炼出主要内容. # 注意提炼出的内容其描述视角和章节结构尽量与原文一致,并尽可能的保留细节以用于后续推理,请省略其他客套用语,仅输出提炼好的内容。""" # chat_prompt = f"max_tokens: {safe_size}\n\n文章内容:\n\n{text}" else: system_prompt = """I hope you can serve as an article filter and refiner, filtering and refining the articles sent by users. If the content of the article is insufficient for refinement, please omit other polite phrases and output only one word: None. If the article can be refined, please help me filter out sensitive content related to politics, geopolitics, violence, and sex from the article, and extract the main content from the article. Please note that the perspective and chapter structure of the extracted content should be as consistent with the original as possible, and retain as many details as possible for subsequent reasoning. Please omit other polite phrases and only output the refined content.""" chat_prompt = f"Article content:\n{text}" # system_prompt = """我希望你是一个文章过滤与提炼器,过滤和提炼用户发送的文章。当文章内容不足以提炼时,请省略其他客套用语,仅输出一个单词:None。 # 如果文章可以精炼请帮我滤掉文章中与政治、地缘政治、暴力、性等有关的敏感内容,并从文章中提炼出主要内容。 # 注意提炼出的内容其描述视角和章节结构尽量与原文一致,并尽可能的保留细节以用于后续推理。请省略其他客套用语,仅输出提炼好的内容。""" # chat_prompt = f"文章内容:\n{text}" chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': chat_prompt}] return generate_chat_completion(summary_model_config, chat_messages, agent_name, "text_summary", response_func, stream_mode) def generate_text_clues(text: str, focus: str, summary_model_config: LLMConfig, agent_name: str, response_func: ResponseFuncType, stream_mode: Optional[bool] = None) -> tuple[str, int]: """Generate a clue summary of the text 生成文本线索摘要 :param text: Text to be compressed. 待压缩的文本。 :param focus: The focus direction when compressing text. 压缩文本时的专注方向。 :param summary_model_config: LLM configuration used for text compression. 用于压缩文本的 LLM 配置。 :param agent_name: :param response_func: Used to return results to the interface or terminal. 用于向接口或终端返回结果 :param stream_mode: :return: --compressed_text: The text after compression. 压缩后的文本。 --total_tokens: Total tokens after compression. 压缩后的整体tokens。 """ info = environment_info() system_prompt = """I hope you are an agent who is good at discovering the truth in real-time, capable of finding content that helps infer the answer to the question from the information sent by users. Please note that if the content of the information has no extractable value, please omit other polite expressions and output only one word: None. Also, please help me filter out sensitive content related to politics, geopolitics, violence, and sex in the information.""" # system_prompt = """我希望你是一个善于发现实时真相的探员, 能从用户发送的资料中帮我找到有助于推断出问题答案的内容。 # 需要注意的是,如果资料内容没有可提取的价值,请省略其他客套用语,仅输出一个单词:None。另外还请帮我过滤掉资料中与政治、地缘政治、暴力、性等有关的敏感内容。""" chat_messages = [{'role': 'system', 'content': system_prompt}, {'role': 'user', 'content': f'The current question is:{focus}\n\nEnvironmental information:\n{info}\n\nMaterial content:\n\n{text}'}] # chat_messages = [{'role': 'user', 'content': f'当前的问题是:{focus}\n\n环境信息:\n{info}\n\n资料内容:\n\n{text}'}] return generate_chat_completion(summary_model_config, chat_messages, agent_name, "clue_summary", response_func, stream_mode) def split_text(text: str, split_size: int, model: Optional[str] = None) -> List[str]: """Split the long text and store the text slices in a list 将长文本拆分,并将文本切片存储至列表 """ split_texts = []
count_tokens = count_text_tokens(text, model)
1
2023-12-06 03:24:34+00:00
8k
JingHao99/IDR-Ingredients-oriented-Degradation-Reformulation
inference.py
[ { "identifier": "AverageMeter", "path": "utils/metric_util.py", "snippet": "class AverageMeter():\r\n \"\"\" Computes and stores the average and current value \"\"\"\r\n\r\n def __init__(self):\r\n self.reset()\r\n\r\n def reset(self):\r\n \"\"\" Reset all statistics \"\"\"\r\n self.val = 0\r\n self.avg = 0\r\n self.sum = 0\r\n self.count = 0\r\n\r\n def update(self, val, n=1):\r\n \"\"\" Update statistics \"\"\"\r\n self.val = val\r\n self.sum += val * n\r\n self.count += n\r\n self.avg = self.sum / self.count\r" }, { "identifier": "save_img_tensor", "path": "utils/tensor_op.py", "snippet": "def save_img_tensor(restored,result_dir,ippath):\r\n '''\r\n :param restored: (1,C,H,W)\r\n :param result_dir:\r\n :param ippath:\r\n :return:\r\n '''\r\n restored = torch.clamp(restored, 0, 1).cpu().detach().permute(0, 2, 3, 1).squeeze(0).numpy()\r\n util.save_img(img_as_ubyte(restored),util.Generate_rp(result_dir,ippath))\r" }, { "identifier": "save_image_tensor", "path": "utils/tensor_op.py", "snippet": "def save_image_tensor(image_tensor, output_path=\"output/\"):\r\n image_np = torch_to_np(image_tensor)\r\n p = np_to_pil(image_np)\r\n p.save(output_path)\r" }, { "identifier": "mkdir", "path": "utils/util.py", "snippet": "def mkdir(path):\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r" }, { "identifier": "setup_logger", "path": "utils/util.py", "snippet": "def setup_logger(logger_name, root, phase, level=logging.INFO, screen=False, tofile=False):\r\n '''\r\n util.setup_logger('base', opt['path']['log'], 'train_' + opt['name'], level=logging.INFO,\r\n screen=True, tofile=True)\r\n logger = logging.getLogger('base')\r\n logger.info(option.dict2str(opt))\r\n '''\r\n lg = logging.getLogger(logger_name)\r\n fmt = '%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s'\r\n color_fmt = colored('%(asctime)s.%(msecs)03d','green') + '- %(levelname)s: %(message)s'\r\n formatter = logging.Formatter(fmt=color_fmt,\r\n datefmt='%y-%m-%d %H:%M:%S')\r\n lg.setLevel(level)\r\n lg.propagate = False\r\n if tofile:\r\n log_file = os.path.join(root, phase + '_{}.log'.format(get_timestamp()))\r\n fh = logging.FileHandler(log_file, mode='w')\r\n fh.setFormatter(formatter)\r\n lg.addHandler(fh)\r\n if screen:\r\n sh = logging.StreamHandler()\r\n sh.setFormatter(formatter)\r\n lg.addHandler(sh)\r" }, { "identifier": "crop_HWC_img", "path": "utils/data_util.py", "snippet": "def crop_HWC_img(image, base=64):\r\n \"\"\"\r\n 裁切到multiple of base的size上\r\n :param image: H,W,C\r\n :param base: (int)\r\n :return:\r\n \"\"\"\r\n h = image.shape[0]\r\n w = image.shape[1]\r\n crop_h = h % base\r\n crop_w = w % base\r\n return image[crop_h // 2:h - crop_h + crop_h // 2, crop_w // 2:w - crop_w + crop_w // 2, :]\r" }, { "identifier": "random_augmentation", "path": "utils/data_util.py", "snippet": "def random_augmentation(*args):\r\n out = []\r\n flag_aug = random.randint(0,7)\r\n for data in args:\r\n out.append(data_augmentation(data, flag_aug).copy())\r\n return out\r" }, { "identifier": "tensor2img", "path": "utils/data_util.py", "snippet": "def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)):\r\n \"\"\"Convert torch Tensors into image numpy arrays.\r\n\r\n After clamping to [min, max], values will be normalized to [0, 1].\r\n\r\n Args:\r\n tensor (Tensor or list[Tensor]): Accept shapes:\r\n 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W);\r\n 2) 3D Tensor of shape (3/1 x H x W);\r\n 3) 2D Tensor of shape (H x W).\r\n Tensor channel should be in RGB order.\r\n rgb2bgr (bool): Whether to change rgb to bgr.\r\n out_type (numpy type): output types. If ``np.uint8``, transform outputs\r\n to uint8 type with range [0, 255]; otherwise, float type with\r\n range [0, 1]. Default: ``np.uint8``.\r\n min_max (tuple[int]): min and max values for clamp.\r\n\r\n Returns:\r\n (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of\r\n shape (H x W). The channel order is BGR.\r\n \"\"\"\r\n if not (torch.is_tensor(tensor) or\r\n (isinstance(tensor, list)\r\n and all(torch.is_tensor(t) for t in tensor))):\r\n raise TypeError(\r\n f'tensor or list of tensors expected, got {type(tensor)}')\r\n\r\n if torch.is_tensor(tensor):\r\n tensor = [tensor]\r\n result = []\r\n for _tensor in tensor:\r\n _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max)\r\n _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])\r\n\r\n n_dim = _tensor.dim()\r\n if n_dim == 4:\r\n img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy()\r\n img_np = img_np.transpose(1, 2, 0)\r\n if rgb2bgr:\r\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\r\n elif n_dim == 3:\r\n img_np = _tensor.numpy()\r\n img_np = img_np.transpose(1, 2, 0)\r\n if img_np.shape[2] == 1: # gray image\r\n img_np = np.squeeze(img_np, axis=2)\r\n else:\r\n if rgb2bgr:\r\n img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\r\n elif n_dim == 2:\r\n img_np = _tensor.numpy()\r\n else:\r\n raise TypeError('Only support 4D, 3D or 2D tensor. '\r\n f'But received with dimension: {n_dim}')\r\n if out_type == np.uint8:\r\n # Unlike MATLAB, numpy.unit8() WILL NOT round by default.\r\n img_np = (img_np * 255.0).round()\r\n img_np = img_np.astype(out_type)\r\n result.append(img_np)\r\n if len(result) == 1:\r\n result = result[0]\r\n return result\r" }, { "identifier": "compute_psnr_ssim", "path": "metrics/psnr_ssim.py", "snippet": "def compute_psnr_ssim(recoverd, clean):\r\n \"\"\"\r\n model.output输入\r\n \"\"\"\r\n assert recoverd.shape == clean.shape\r\n recoverd = np.clip(recoverd.detach().cpu().numpy(), 0, 1)\r\n clean = np.clip(clean.detach().cpu().numpy(), 0, 1)\r\n\r\n recoverd = recoverd.transpose(0, 2, 3, 1)\r\n clean = clean.transpose(0, 2, 3, 1)\r\n psnr = 0\r\n ssim = 0\r\n\r\n for i in range(recoverd.shape[0]):\r\n # psnr_val += compare_psnr(clean[i], recoverd[i])\r\n # ssim += compare_ssim(clean[i], recoverd[i], multichannel=True)\r\n psnr += peak_signal_noise_ratio(clean[i], recoverd[i], data_range=1)\r\n ssim += structural_similarity(clean[i], recoverd[i], data_range=1, multichannel=True)\r\n\r\n return psnr / recoverd.shape[0], ssim / recoverd.shape[0], recoverd.shape[0]\r" }, { "identifier": "calculate_psnr", "path": "metrics/psnr_ssim.py", "snippet": "def calculate_psnr(img1, img2, crop_border=0, test_y_channel=False):\r\n \"\"\"img1 and img2 have range [0, 255] np.uint8\r\n tensor2img后输入\r\n crop_border (int): Cropped pixels in each edge of an image. These\r\n pixels are not involved in the PSNR calculation.\r\n test_y_channel (bool): Test on Y channel of YCbCr. Default: False.\r\n\r\n Returns:\r\n float: psnr result.\r\n \"\"\"\r\n img1 = img1.astype(np.float64)\r\n img2 = img2.astype(np.float64)\r\n if crop_border != 0:\r\n img1 = img1[crop_border:-crop_border, crop_border:-crop_border, ...]\r\n img2 = img2[crop_border:-crop_border, crop_border:-crop_border, ...]\r\n if test_y_channel:\r\n img1 = to_y_channel(img1)\r\n img2 = to_y_channel(img2)\r\n\r\n mse = np.mean((img1 - img2)**2)\r\n if mse == 0:\r\n return float('inf')\r\n return 20 * math.log10(255.0 / math.sqrt(mse))\r" }, { "identifier": "calculate_ssim", "path": "metrics/psnr_ssim.py", "snippet": "def calculate_ssim(img1, img2):\r\n '''calculate SSIM\r\n the same outputs as MATLAB's\r\n img1, img2: [0, 255]\r\n '''\r\n if not img1.shape == img2.shape:\r\n raise ValueError('Input images must have the same dimensions.')\r\n if img1.ndim == 2:\r\n return ssim(img1, img2)\r\n elif img1.ndim == 3:\r\n if img1.shape[2] == 3:\r\n ssims = []\r\n for i in range(3):\r\n ssims.append(ssim(img1, img2))\r\n return np.array(ssims).mean()\r\n elif img1.shape[2] == 1:\r\n return ssim(np.squeeze(img1), np.squeeze(img2))\r\n else:\r\n raise ValueError('Wrong input image dimensions.')\r" }, { "identifier": "IDR_restormer", "path": "models/archs/IDR_restormer_arch.py", "snippet": "class IDR_restormer(nn.Module):\n def __init__(self,\n inp_channels=3,\n out_channels=3,\n dim=48,\n num_blocks=[4, 6, 6, 8],\n num_refinement_blocks=4,\n heads=[1, 2, 4, 8],\n ffn_expansion_factor=2.66,\n bias=False,\n LayerNorm_type='WithBias', ## Other option 'BiasFree'\n num_degra_queries = 24,\n keep_degra = 48,\n degra_type = 5,\n sam = True,\n ops_type = 5,\n pred = True\n ):\n super(IDR_restormer, self).__init__()\n\n self.de_dict = {'denoise': 0, 'denoise_15': 0, 'denoise_25': 0, 'denoise_50': 0, 'derain': 1, 'dehaze': 2, 'deblur': 3, 'delowlight': 4, 'clean': 5}\n\n self.patch_embed =OverlapPatchEmbed_Keep(inp_channels, dim)\n\n self.encoder_level1 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=dim, num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor, bias=bias,\n LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])\n\n self.down1_2 = Downsample(dim) ## From Level 1 to Level 2\n self.encoder_level2 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])])\n\n self.down2_3 = Downsample(int(dim * 2 ** 1)) ## From Level 2 to Level 3\n self.encoder_level3 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 2), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])])\n\n self.down3_4 = Downsample(int(dim * 2 ** 2)) ## From Level 3 to Level 4\n self.latent = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 3), num_heads=heads[3], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[3])])\n\n self.up4_3 = Upsample(int(dim * 2 ** 3)) ## From Level 4 to Level 3\n self.reduce_chan_level3 = nn.Conv2d(int(dim * 2 ** 3), int(dim * 2 ** 2), kernel_size=1, bias=bias)\n self.decoder_level3 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 2), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])])\n\n self.up3_2 = Upsample(int(dim * 2 ** 2)) ## From Level 3 to Level 2\n self.reduce_chan_level2 = nn.Conv2d(int(dim * 2 ** 2), int(dim * 2 ** 1), kernel_size=1, bias=bias)\n self.decoder_level2 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])])\n\n self.up2_1 = Upsample(int(dim * 2 ** 1)) ## From Level 2 to Level 1 (NO 1x1 conv to reduce channels)\n\n self.decoder_level1 = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])\n\n self.refinement = nn.Sequential(*[\n MDTA_TransformerBlock(dim=int(dim * 2 ** 1), num_heads=heads[0], ffn_expansion_factor=ffn_expansion_factor,\n bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_refinement_blocks)])\n\n self.output = nn.Conv2d(int(dim * 2 ** 1), out_channels, kernel_size=3, stride=1, padding=1, bias=bias)\n\n self.degra_key = nn.Parameter(torch.randn(degra_type, num_degra_queries, int(dim * 2 ** 3)), requires_grad=True)\n self.dmixer = PI_MLP_Mixer(dim=int(dim * 2 ** 3),num_degra=num_degra_queries*degra_type,keep_degra=keep_degra,init='pca')\n self.kdp_level1 = Key_TransformerBlock(dim=dim, dimkey=int(dim * 2 ** 3), num_heads=heads[0], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)\n self.kdp_level2 = Key_TransformerBlock(dim=int(dim * 2 ** 1), dimkey=int(dim * 2 ** 3), num_heads=heads[1], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)\n self.kdp_level3 = Key_TransformerBlock(dim=int(dim * 2 ** 2), dimkey=int(dim * 2 ** 3), num_heads=heads[2], ffn_expansion_factor=2.66, bias=bias, LayerNorm_type=LayerNorm_type,principle=True, sam=sam, ops_type=ops_type,pred=pred)\n self.cri_pix = nn.L1Loss().cuda()\n\n\n\n def forward(self, inp_img, degra_type=None, gt=None, epoch=None):\n \"\"\"\n only input_image is required during inference\n \"\"\"\n flag=0\n batch_size,c,h,w = inp_img.shape\n if epoch and epoch <= 550:\n # stage 1 training - Task-oriented knowledge collection\n de_type = degra_type[0]\n degra_id = self.de_dict[de_type]\n degra_key = self.degra_key[degra_id,:,:].unsqueeze(0).expand(batch_size,-1,-1)\n else:\n # stage 2 training - Ingredients-oriented knowedge intergation\n if flag==0:\n U,S,V = process_USV(self.degra_key.detach())\n flag=1\n U,V = self.dmixer(U,V,batch_size)\n degra_key = [U,S,V]\n de_type = None\n\n\n inp_enc_level1 = self.patch_embed(inp_img)\n out_enc_level1 = self.encoder_level1(inp_enc_level1)\n torch_resize1 = Resize([out_enc_level1.shape[2],out_enc_level1.shape[3]])\n inp_img1 = torch_resize1(inp_img)\n out_enc_level1,output_img1,pred1 = self.kdp_level1(out_enc_level1,degra_key,inp_img1,degra_type=de_type)\n\n inp_enc_level2 = self.down1_2(out_enc_level1)\n out_enc_level2 = self.encoder_level2(inp_enc_level2)\n torch_resize2 = Resize([out_enc_level2.shape[2],out_enc_level2.shape[3]])\n inp_img2 = torch_resize2(inp_img)\n out_enc_level2,output_img2,pred2 = self.kdp_level2(out_enc_level2,degra_key,inp_img2,degra_type=de_type)\n\n inp_enc_level3 = self.down2_3(out_enc_level2)\n out_enc_level3 = self.encoder_level3(inp_enc_level3)\n torch_resize3 = Resize([out_enc_level3.shape[2],out_enc_level3.shape[3]])\n inp_img3 = torch_resize3(inp_img)\n out_enc_level3,output_img3,pred3 = self.kdp_level3(out_enc_level3,degra_key,inp_img3,degra_type=de_type)\n\n inp_enc_level4 = self.down3_4(out_enc_level3)\n latent = self.latent(inp_enc_level4)\n\n inp_dec_level3 = self.up4_3(latent)\n inp_dec_level3 = torch.cat([inp_dec_level3, out_enc_level3], 1)\n inp_dec_level3 = self.reduce_chan_level3(inp_dec_level3)\n out_dec_level3 = self.decoder_level3(inp_dec_level3)\n\n inp_dec_level2 = self.up3_2(out_dec_level3)\n inp_dec_level2 = torch.cat([inp_dec_level2, out_enc_level2], 1)\n inp_dec_level2 = self.reduce_chan_level2(inp_dec_level2)\n out_dec_level2 = self.decoder_level2(inp_dec_level2)\n\n inp_dec_level1 = self.up2_1(out_dec_level2)\n inp_dec_level1 = torch.cat([inp_dec_level1, out_enc_level1], 1)\n out_dec_level1 = self.decoder_level1(inp_dec_level1)\n\n out_dec_level1 = self.refinement(out_dec_level1)\n out_dec_level1 = self.output(out_dec_level1) + inp_img\n \n if gt is not None:\n gt_img1 = torch_resize1(gt)\n gt_img2 = torch_resize2(gt)\n gt_img3 = torch_resize3(gt)\n output_img = [output_img1,output_img2,output_img3] \n gt_img = [gt_img1,gt_img2,gt_img3] \n loss = np.sum([self.cri_pix(output_img[j],gt_img[j]) for j in range(len(output_img))])\n return [out_dec_level1,loss,pred1,pred2,pred3]\n else:\n return out_dec_level1" } ]
import argparse import subprocess import numpy as np import os import torch import torch.nn as nn import logging from tqdm import tqdm from PIL import Image from torch.utils.data import DataLoader from torch.utils.data import Dataset from torchvision.transforms import ToPILImage, Compose, RandomCrop, ToTensor from utils.metric_util import AverageMeter from utils.tensor_op import save_img_tensor, save_image_tensor from utils.util import mkdir, setup_logger from utils.data_util import crop_HWC_img, random_augmentation, tensor2img from metrics.psnr_ssim import compute_psnr_ssim, calculate_psnr, calculate_ssim from models.archs.IDR_restormer_arch import IDR_restormer
6,695
noisy_img, _ = self._add_gaussian_noise(clean_img) clean_img, noisy_img = self.toTensor(clean_img), self.toTensor(noisy_img) return [clean_name], noisy_img, clean_img def __len__(self): return self.num_clean class DerainDehazeDataset(Dataset): def __init__(self, args, task="derain"): super(DerainDehazeDataset, self).__init__() self.ids = [] self.task_idx = 0 self.args = args self.task_dict = {'derain': 0, 'dehaze': 1, 'deblur':2, 'low-light':3, 'UDC_T':4, 'UDC_P':5} self.toTensor = ToTensor() self.set_dataset(task) def _init_input_ids(self): if self.task_idx == 0: self.ids = [] name_list = os.listdir(self.args.derain_path + 'input/') self.ids += [self.args.derain_path + 'input/' + id_ for id_ in name_list] elif self.task_idx == 1: self.ids = [] name_list = os.listdir(self.args.dehaze_path + 'input/') self.ids += [self.args.dehaze_path + 'input/' + id_ for id_ in name_list] elif self.task_idx == 2: self.ids = [] name_list = os.listdir(self.args.deblur_path + 'input/') self.ids += [self.args.deblur_path + 'input/' + id_ for id_ in name_list] elif self.task_idx == 3: self.ids = [] name_list = os.listdir(self.args.low_light_path + 'input/') self.ids += [self.args.low_light_path + 'input/' + id_ for id_ in name_list] elif self.task_idx == 4: self.ids = [] name_list = os.listdir(self.args.udc_T_path + 'input/') self.ids += [self.args.udc_T_path + 'input/' + id_ for id_ in name_list] elif self.task_idx == 5: self.ids = [] name_list = os.listdir(self.args.udc_P_path + 'input/') self.ids += [self.args.udc_P_path + 'input/' + id_ for id_ in name_list] self.length = len(self.ids) def _get_gt_path(self, degraded_name): if self.task_idx == 0: gt_name = degraded_name.replace("input", "target") elif self.task_idx == 1: dir_name = degraded_name.split("input")[0] + 'target/' name = degraded_name.split('/')[-1].split('_')[0] + '.png' gt_name = dir_name + name elif self.task_idx == 2: gt_name = degraded_name.replace("input", "target") elif self.task_idx == 3: gt_name = degraded_name.replace("input", "target") elif self.task_idx == 4: gt_name = degraded_name.replace("input", "target") elif self.task_idx == 5: gt_name = degraded_name.replace("input", "target") return gt_name def set_dataset(self, task): self.task_idx = self.task_dict[task] self._init_input_ids() def _edgeComputation(self,x): x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:]) x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:]) y = np.zeros_like(x) y[:,1:,:] += x_diffx y[:,:-1,:] += x_diffx y[1:,:,:] += x_diffy y[:-1,:,:] += x_diffy y = np.sum(y,2)/3 y /= 4 return y[:,:,None].astype(np.float32) def __getitem__(self, idx): degraded_path = self.ids[idx] clean_path = self._get_gt_path(degraded_path) degraded_img = crop_HWC_img(np.array(Image.open(degraded_path).convert('RGB')), base=32) clean_img = crop_HWC_img(np.array(Image.open(clean_path).convert('RGB')), base=32) clean_img, degraded_img = self.toTensor(clean_img), self.toTensor(degraded_img) degraded_name = degraded_path.split('/')[-1][:-4] return [degraded_name], degraded_img, clean_img def __len__(self): return self.length def test_Denoise(net, dataset, task="CBSD68", sigma=15,save_img=True): logger = logging.getLogger('base') output_path = opt.output_path + 'denoise/' + str(sigma) + '/' # subprocess.check_output(['mkdir', '-p', output_path]) mkdir(output_path) dataset.set_dataset(task) dataset.set_sigma(sigma) testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0) psnr = AverageMeter() ssim = AverageMeter() with torch.no_grad(): for ([clean_name], degrad_patch, clean_patch) in tqdm(testloader): degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda() restored = net(degrad_patch) if type(restored) == list: restored = restored[0]
class DenoiseTestDataset(Dataset): def __init__(self, args, dataset="CBSD68"): super(DenoiseTestDataset, self).__init__() self.args = args self.clean_ids = [] self.sigma = 15 self.dataset_dict = {'CBSD68': 0, 'urban100': 1, 'Kodak24':2} self.set_dataset(dataset) self.toTensor = ToTensor() def _init_clean_ids(self): if self.task_idx == 0: self.clean_ids = [] name_list = os.listdir(self.args.denoise_CBSD68_path) self.clean_ids += [self.args.denoise_CBSD68_path + id_ for id_ in name_list] elif self.task_idx == 1: self.clean_ids = [] name_list = os.listdir(self.args.denoise_urban100_path) self.clean_ids += [self.args.denoise_urban100_path + id_ for id_ in name_list] elif self.task_idx == 2: self.clean_ids = [] name_list = os.listdir(self.args.denoise_Kodak24_path) self.clean_ids += [self.args.denoise_Kodak24_path + id_ for id_ in name_list] self.num_clean = len(self.clean_ids) def set_dataset(self, dataset): self.task_idx = self.dataset_dict[dataset] self._init_clean_ids() def _add_gaussian_noise(self, clean_patch): noise = np.random.randn(*clean_patch.shape) noisy_patch = np.clip(clean_patch + noise * self.sigma, 0, 255).astype(np.uint8) return noisy_patch, clean_patch def _edgeComputation(self,x): x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:]) x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:]) y = np.zeros_like(x) y[:,1:,:] += x_diffx y[:,:-1,:] += x_diffx y[1:,:,:] += x_diffy y[:-1,:,:] += x_diffy y = np.sum(y,2)/3 y /= 4 return y[:,:,None].astype(np.float32) def set_sigma(self, sigma): self.sigma = sigma def __getitem__(self, clean_id): clean_img = crop_HWC_img(np.array(Image.open(self.clean_ids[clean_id]).convert('RGB')), base=32) clean_name = self.clean_ids[clean_id].split("/")[-1].split('.')[0] noisy_img, _ = self._add_gaussian_noise(clean_img) clean_img, noisy_img = self.toTensor(clean_img), self.toTensor(noisy_img) return [clean_name], noisy_img, clean_img def __len__(self): return self.num_clean class DerainDehazeDataset(Dataset): def __init__(self, args, task="derain"): super(DerainDehazeDataset, self).__init__() self.ids = [] self.task_idx = 0 self.args = args self.task_dict = {'derain': 0, 'dehaze': 1, 'deblur':2, 'low-light':3, 'UDC_T':4, 'UDC_P':5} self.toTensor = ToTensor() self.set_dataset(task) def _init_input_ids(self): if self.task_idx == 0: self.ids = [] name_list = os.listdir(self.args.derain_path + 'input/') self.ids += [self.args.derain_path + 'input/' + id_ for id_ in name_list] elif self.task_idx == 1: self.ids = [] name_list = os.listdir(self.args.dehaze_path + 'input/') self.ids += [self.args.dehaze_path + 'input/' + id_ for id_ in name_list] elif self.task_idx == 2: self.ids = [] name_list = os.listdir(self.args.deblur_path + 'input/') self.ids += [self.args.deblur_path + 'input/' + id_ for id_ in name_list] elif self.task_idx == 3: self.ids = [] name_list = os.listdir(self.args.low_light_path + 'input/') self.ids += [self.args.low_light_path + 'input/' + id_ for id_ in name_list] elif self.task_idx == 4: self.ids = [] name_list = os.listdir(self.args.udc_T_path + 'input/') self.ids += [self.args.udc_T_path + 'input/' + id_ for id_ in name_list] elif self.task_idx == 5: self.ids = [] name_list = os.listdir(self.args.udc_P_path + 'input/') self.ids += [self.args.udc_P_path + 'input/' + id_ for id_ in name_list] self.length = len(self.ids) def _get_gt_path(self, degraded_name): if self.task_idx == 0: gt_name = degraded_name.replace("input", "target") elif self.task_idx == 1: dir_name = degraded_name.split("input")[0] + 'target/' name = degraded_name.split('/')[-1].split('_')[0] + '.png' gt_name = dir_name + name elif self.task_idx == 2: gt_name = degraded_name.replace("input", "target") elif self.task_idx == 3: gt_name = degraded_name.replace("input", "target") elif self.task_idx == 4: gt_name = degraded_name.replace("input", "target") elif self.task_idx == 5: gt_name = degraded_name.replace("input", "target") return gt_name def set_dataset(self, task): self.task_idx = self.task_dict[task] self._init_input_ids() def _edgeComputation(self,x): x_diffx = np.abs(x[:,1:,:] - x[:,:-1,:]) x_diffy = np.abs(x[1:,:,:] - x[:-1,:,:]) y = np.zeros_like(x) y[:,1:,:] += x_diffx y[:,:-1,:] += x_diffx y[1:,:,:] += x_diffy y[:-1,:,:] += x_diffy y = np.sum(y,2)/3 y /= 4 return y[:,:,None].astype(np.float32) def __getitem__(self, idx): degraded_path = self.ids[idx] clean_path = self._get_gt_path(degraded_path) degraded_img = crop_HWC_img(np.array(Image.open(degraded_path).convert('RGB')), base=32) clean_img = crop_HWC_img(np.array(Image.open(clean_path).convert('RGB')), base=32) clean_img, degraded_img = self.toTensor(clean_img), self.toTensor(degraded_img) degraded_name = degraded_path.split('/')[-1][:-4] return [degraded_name], degraded_img, clean_img def __len__(self): return self.length def test_Denoise(net, dataset, task="CBSD68", sigma=15,save_img=True): logger = logging.getLogger('base') output_path = opt.output_path + 'denoise/' + str(sigma) + '/' # subprocess.check_output(['mkdir', '-p', output_path]) mkdir(output_path) dataset.set_dataset(task) dataset.set_sigma(sigma) testloader = DataLoader(dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0) psnr = AverageMeter() ssim = AverageMeter() with torch.no_grad(): for ([clean_name], degrad_patch, clean_patch) in tqdm(testloader): degrad_patch, clean_patch = degrad_patch.cuda(), clean_patch.cuda() restored = net(degrad_patch) if type(restored) == list: restored = restored[0]
temp_psnr, temp_ssim, N = compute_psnr_ssim(restored, clean_patch)
8
2023-12-07 10:58:34+00:00
8k
TACJu/Compositor
Compositor_Mask2Former/mask2former/maskformer_model.py
[ { "identifier": "SetCriterion", "path": "Compositor_Mask2Former/mask2former/modeling/criterion.py", "snippet": "class SetCriterion(nn.Module):\n \"\"\"This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n\n def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,\n num_points, oversample_ratio, importance_sample_ratio):\n \"\"\"Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.eos_coef = eos_coef\n self.losses = losses\n empty_weight = torch.ones(self.num_classes + 1)\n empty_weight[-1] = self.eos_coef\n self.register_buffer(\"empty_weight\", empty_weight)\n\n # pointwise mask loss parameters\n self.num_points = num_points\n self.oversample_ratio = oversample_ratio\n self.importance_sample_ratio = importance_sample_ratio\n\n def loss_labels(self, outputs, targets, indices, num_masks):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert \"pred_logits\" in outputs\n src_logits = outputs[\"pred_logits\"].float()\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(\n src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device\n )\n target_classes[idx] = target_classes_o\n\n loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)\n losses = {\"loss_ce\": loss_ce}\n return losses\n \n def loss_masks(self, outputs, targets, indices, num_masks):\n \"\"\"Compute the losses related to the masks: the focal loss and the dice loss.\n targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]\n \"\"\"\n assert \"pred_masks\" in outputs\n\n src_idx = self._get_src_permutation_idx(indices)\n tgt_idx = self._get_tgt_permutation_idx(indices)\n src_masks = outputs[\"pred_masks\"]\n src_masks = src_masks[src_idx]\n masks = [t[\"masks\"] for t in targets]\n # TODO use valid to mask invalid areas due to padding in loss\n target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()\n target_masks = target_masks.to(src_masks)\n target_masks = target_masks[tgt_idx]\n\n # No need to upsample predictions as we are using normalized coordinates :)\n # N x 1 x H x W\n src_masks = src_masks[:, None]\n target_masks = target_masks[:, None]\n\n with torch.no_grad():\n # sample point_coords\n point_coords = get_uncertain_point_coords_with_randomness(\n src_masks,\n lambda logits: calculate_uncertainty(logits),\n self.num_points,\n self.oversample_ratio,\n self.importance_sample_ratio,\n )\n # get gt labels\n point_labels = point_sample(\n target_masks,\n point_coords,\n align_corners=False,\n ).squeeze(1)\n\n point_logits = point_sample(\n src_masks,\n point_coords,\n align_corners=False,\n ).squeeze(1)\n\n losses = {\n \"loss_mask\": sigmoid_ce_loss_jit(point_logits, point_labels, num_masks),\n \"loss_dice\": dice_loss_jit(point_logits, point_labels, num_masks),\n }\n\n del src_masks\n del target_masks\n return losses\n\n def _get_src_permutation_idx(self, indices):\n # permute predictions following indices\n batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n src_idx = torch.cat([src for (src, _) in indices])\n return batch_idx, src_idx\n\n def _get_tgt_permutation_idx(self, indices):\n # permute targets following indices\n batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n return batch_idx, tgt_idx\n\n def get_loss(self, loss, outputs, targets, indices, num_masks):\n loss_map = {\n 'labels': self.loss_labels,\n 'masks': self.loss_masks,\n }\n assert loss in loss_map, f\"do you really want to compute {loss} loss?\"\n return loss_map[loss](outputs, targets, indices, num_masks)\n\n def forward(self, outputs, targets):\n \"\"\"This performs the loss computation.\n Parameters:\n outputs: dict of tensors, see the output specification of the model for the format\n targets: list of dicts, such that len(targets) == batch_size.\n The expected keys in each dict depends on the losses applied, see each loss' doc\n \"\"\"\n outputs_without_aux = {k: v for k, v in outputs.items() if k != \"aux_outputs\"}\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_masks = sum(len(t[\"labels\"]) for t in targets)\n num_masks = torch.as_tensor(\n [num_masks], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_masks)\n num_masks = torch.clamp(num_masks / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n losses.update(self.get_loss(loss, outputs, targets, indices, num_masks))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_masks)\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses\n\n def __repr__(self):\n head = \"Criterion \" + self.__class__.__name__\n body = [\n \"matcher: {}\".format(self.matcher.__repr__(_repr_indent=8)),\n \"losses: {}\".format(self.losses),\n \"weight_dict: {}\".format(self.weight_dict),\n \"num_classes: {}\".format(self.num_classes),\n \"eos_coef: {}\".format(self.eos_coef),\n \"num_points: {}\".format(self.num_points),\n \"oversample_ratio: {}\".format(self.oversample_ratio),\n \"importance_sample_ratio: {}\".format(self.importance_sample_ratio),\n ]\n _repr_indent = 4\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "HungarianMatcher", "path": "Compositor_Mask2Former/mask2former/modeling/matcher.py", "snippet": "class HungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self, cost_class: float = 1, cost_mask: float = 1, cost_dice: float = 1, num_points: int = 0):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost\n cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_mask = cost_mask\n self.cost_dice = cost_dice\n\n assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, \"all costs cant be 0\"\n\n self.num_points = num_points\n\n @torch.no_grad()\n def memory_efficient_forward(self, outputs, targets):\n \"\"\"More memory-friendly matching\"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n indices = []\n\n # Iterate through batch size\n for b in range(bs):\n\n out_prob = outputs[\"pred_logits\"][b].softmax(-1) # [num_queries, num_classes]\n tgt_ids = targets[b][\"labels\"]\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n\n out_mask = outputs[\"pred_masks\"][b] # [num_queries, H_pred, W_pred]\n # gt masks are already padded when preparing target\n tgt_mask = targets[b][\"masks\"].to(out_mask)\n\n out_mask = out_mask[:, None]\n tgt_mask = tgt_mask[:, None]\n # all masks share the same set of points for efficient matching!\n point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)\n # get gt labels\n tgt_mask = point_sample(\n tgt_mask,\n point_coords.repeat(tgt_mask.shape[0], 1, 1),\n align_corners=False,\n ).squeeze(1)\n\n out_mask = point_sample(\n out_mask,\n point_coords.repeat(out_mask.shape[0], 1, 1),\n align_corners=False,\n ).squeeze(1)\n\n with autocast(enabled=False):\n out_mask = out_mask.float()\n tgt_mask = tgt_mask.float()\n # Compute the focal loss between masks\n cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)\n\n # Compute the dice loss betwen masks\n cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)\n \n # Final cost matrix\n C = (\n self.cost_mask * cost_mask\n + self.cost_class * cost_class\n + self.cost_dice * cost_dice\n )\n C = C.reshape(num_queries, -1).cpu()\n\n indices.append(linear_sum_assignment(C))\n\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_masks\": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"masks\": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n return self.memory_efficient_forward(outputs, targets)\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_mask: {}\".format(self.cost_mask),\n \"cost_dice: {}\".format(self.cost_dice),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" } ]
from typing import Tuple from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.data import MetadataCatalog from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head from detectron2.modeling.backbone import Backbone from detectron2.modeling.postprocessing import sem_seg_postprocess from detectron2.structures import Boxes, ImageList, Instances, BitMasks from detectron2.utils.memory import retry_if_cuda_oom from .modeling.criterion import SetCriterion from .modeling.matcher import HungarianMatcher import torch
4,136
# Copyright (c) Facebook, Inc. and its affiliates. @META_ARCH_REGISTRY.register() class MaskFormer(nn.Module): """ Main class for mask classification semantic segmentation architectures. """ @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, metadata, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # inference semantic_on: bool, panoptic_on: bool, instance_on: bool, test_topk_per_image: int, ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image semantic_on: bool, whether to output semantic segmentation prediction instance_on: bool, whether to output instance segmentation prediction panoptic_on: bool, whether to output panoptic segmentation prediction test_topk_per_image: int, instance segmentation parameter, keep topk instances per image """ super().__init__() self.backbone = backbone self.sem_seg_head = sem_seg_head self.criterion = criterion self.num_queries = num_queries self.overlap_threshold = overlap_threshold self.object_mask_threshold = object_mask_threshold self.metadata = metadata if size_divisibility < 0: # use backbone size_divisibility if not set size_divisibility = self.backbone.size_divisibility self.size_divisibility = size_divisibility self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) # additional args self.semantic_on = semantic_on self.instance_on = instance_on self.panoptic_on = panoptic_on self.test_topk_per_image = test_topk_per_image if not self.semantic_on: assert self.sem_seg_postprocess_before_inference @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion
# Copyright (c) Facebook, Inc. and its affiliates. @META_ARCH_REGISTRY.register() class MaskFormer(nn.Module): """ Main class for mask classification semantic segmentation architectures. """ @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, metadata, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # inference semantic_on: bool, panoptic_on: bool, instance_on: bool, test_topk_per_image: int, ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image semantic_on: bool, whether to output semantic segmentation prediction instance_on: bool, whether to output instance segmentation prediction panoptic_on: bool, whether to output panoptic segmentation prediction test_topk_per_image: int, instance segmentation parameter, keep topk instances per image """ super().__init__() self.backbone = backbone self.sem_seg_head = sem_seg_head self.criterion = criterion self.num_queries = num_queries self.overlap_threshold = overlap_threshold self.object_mask_threshold = object_mask_threshold self.metadata = metadata if size_divisibility < 0: # use backbone size_divisibility if not set size_divisibility = self.backbone.size_divisibility self.size_divisibility = size_divisibility self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) # additional args self.semantic_on = semantic_on self.instance_on = instance_on self.panoptic_on = panoptic_on self.test_topk_per_image = test_topk_per_image if not self.semantic_on: assert self.sem_seg_postprocess_before_inference @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion
matcher = HungarianMatcher(
1
2023-12-12 11:49:28+00:00
8k
turbopuffer/turbopuffer-python
turbopuffer/namespace.py
[ { "identifier": "Cursor", "path": "turbopuffer/vectors.py", "snippet": "class Cursor(str):\n pass" }, { "identifier": "VectorResult", "path": "turbopuffer/vectors.py", "snippet": "class VectorResult:\n \"\"\"\n The VectorResult type represents a set of vectors that are the result of a query.\n\n A VectorResult can be treated as either a lazy iterator or a list by the user.\n Reading the length of the result will internally buffer the full result.\n \"\"\"\n\n namespace: Optional['Namespace'] = None\n data: Optional[SET_DATA] = None\n index: int = -1\n offset: int = 0\n next_cursor: Optional[Cursor] = None\n\n def __init__(self, initial_data: Optional[DATA] = None, namespace: Optional['Namespace'] = None, next_cursor: Optional[Cursor] = None):\n self.namespace = namespace\n self.index = -1\n self.offset = 0\n self.next_cursor = next_cursor\n\n self.data = VectorResult.load_data(initial_data)\n\n def load_data(initial_data: DATA) -> SET_DATA:\n if initial_data:\n if isinstance(initial_data, list):\n if isinstance(initial_data[0], dict):\n return [VectorRow.from_dict(row) for row in initial_data]\n elif isinstance(initial_data[0], VectorRow):\n return initial_data\n else:\n raise ValueError(f'Unsupported list data type: {type(initial_data[0])}')\n elif isinstance(initial_data, dict):\n return VectorColumns.from_dict(initial_data)\n elif isinstance(initial_data, VectorColumns):\n return initial_data\n elif isinstance(initial_data, Iterable):\n raise ValueError('VectorResult from Iterable not yet supported.')\n else:\n raise ValueError(f'Unsupported data type: {type(initial_data)}')\n\n def __str__(self) -> str:\n if not self.next_cursor and self.offset == 0:\n return str(self.data)\n else:\n return (\"VectorResult(\"\n f\"namespace='{self.namespace.name}', \"\n f\"offset={self.offset}, \"\n f\"next_cursor='{self.next_cursor}', \"\n f\"data={self.data})\")\n\n def __len__(self) -> int:\n assert self.offset == 0, \"Can't call len(VectorResult) after iterating\"\n assert self.index == -1, \"Can't call len(VectorResult) after iterating\"\n if not self.next_cursor:\n if self.data is not None:\n return len(self.data)\n return 0\n else:\n it = iter(self)\n self.data = [next for next in it]\n self.offset = 0\n self.index = -1\n self.next_cursor = None\n return len(self.data)\n\n def __getitem__(self, index) -> VectorRow:\n if index >= len(self.data) and self.next_cursor:\n it = iter(self)\n self.data = [next for next in it]\n self.offset = 0\n self.index = -1\n self.next_cursor = None\n return self.data[index]\n\n def __iter__(self) -> 'VectorResult':\n assert self.offset == 0, \"Can't iterate over VectorResult multiple times\"\n return VectorResult(self.data, self.namespace, self.next_cursor)\n\n def __next__(self):\n if self.data is not None and self.index + 1 < len(self.data):\n self.index += 1\n return self.data[self.index]\n elif self.next_cursor is None:\n raise StopIteration\n else:\n response = self.namespace.backend.make_api_request(\n 'vectors',\n self.namespace.name,\n query={'cursor': self.next_cursor}\n )\n self.offset += len(self.data)\n self.index = -1\n self.next_cursor = response.pop('next_cursor', None)\n self.data = VectorResult.load_data(response)\n return self.__next__()" }, { "identifier": "VectorColumns", "path": "turbopuffer/vectors.py", "snippet": "class VectorColumns:\n \"\"\"\n The VectorColumns type represents a set of vectors stored in a column-oriented layout with their attributes.\n\n If the VectorColumns is a response from a query, it may also have a set of distance weightings for each vector.\n \"\"\"\n\n ids: Union[List[int], List[str]]\n vectors: List[Optional[List[float]]]\n attributes: Optional[Dict[str, List[Optional[str]]]] = None\n\n distances: Optional[List[float]] = None\n\n def from_dict(source: dict) -> 'VectorColumns':\n return VectorColumns(\n ids=source.get('ids'),\n vectors=source.get('vectors'),\n attributes=source.get('attributes'),\n distances=source.get('distances'),\n )\n\n def __post_init__(self):\n if 'numpy' in sys.modules and isinstance(self.ids, sys.modules['numpy'].ndarray):\n if self.ids.ndim != 1:\n raise ValueError(f'VectorColumns.ids must a 1d-array, got {self.ids.ndim} dimensions')\n elif not isinstance(self.ids, list):\n raise ValueError('VectorColumns.ids must be a list, got:', type(self.ids))\n if 'numpy' in sys.modules and isinstance(self.vectors, sys.modules['numpy'].ndarray):\n if self.vectors.ndim != 2:\n raise ValueError(f'VectorColumns.ids must a 2d-array, got {self.vectors.ndim} dimensions')\n elif not isinstance(self.vectors, list):\n raise ValueError('VectorColumns.vectors must be a list, got:', type(self.vectors))\n if len(self.ids) != len(self.vectors):\n raise ValueError('VectorColumns.ids and VectorColumns.vectors must be the same length')\n if self.attributes is not None:\n if not isinstance(self.attributes, dict):\n raise ValueError('VectorColumns.attributes must be a dict, got:', type(self.attributes))\n for key, values in self.attributes.items():\n if not isinstance(values, list):\n raise ValueError(f'VectorColumns.attributes[{key}] must be a list, got:', type(values))\n if len(values) != len(self.ids):\n raise ValueError(f'VectorColumns.attributes[{key}] must be the same length as VectorColumns.ids')\n if self.distances is not None and len(self.distances) != len(self.ids):\n raise ValueError('VectorColumns.distances must be the same length as VectorColumns.ids')\n\n def __str__(self) -> str:\n fields = [\n f'ids={self.ids}',\n f'vectors={self.vectors}',\n ]\n if self.attributes:\n fields.append(f'attributes={self.attributes}')\n if self.distances:\n fields.append(f'distances={self.distances}')\n return f\"VectorColumns({', '.join(fields)})\"\n\n def __len__(self) -> int:\n return len(self.ids)\n\n def __getitem__(self, index) -> VectorRow:\n # This functions as the main mechanism for converting Columns to Rows\n row = VectorRow(self.ids[index], self.vectors[index], dist=(self.distances and self.distances[index]))\n if self.attributes:\n row.attributes = dict()\n for key, values in self.attributes.items():\n if values and len(values) > index:\n if values[index] is not None:\n row.attributes[key] = values[index]\n return row\n\n def __iadd__(self, other) -> 'VectorColumns':\n return self.append(other)\n\n @overload\n def append(self, other: VectorRow) -> 'VectorColumns':\n ...\n\n @overload\n def append(self, other: List[VectorRow]) -> 'VectorColumns':\n ...\n\n @overload\n def append(self, other: 'VectorColumns') -> 'VectorColumns':\n ...\n\n def append(self, other) -> 'VectorColumns':\n old_len = len(self.ids)\n if isinstance(other, VectorRow):\n self.ids.append(other.id)\n self.vectors.append(other.vector)\n if other.dist is not None:\n self.distances.append(other.dist)\n new_len = len(self.ids)\n if other.attributes:\n if not self.attributes:\n self.attributes = dict()\n for k, v in other.attributes.items():\n attrs = self.attributes.setdefault(k, [None]*old_len)\n attrs.append(v)\n if self.attributes:\n for v in self.attributes.values():\n if len(v) < new_len:\n v.append(None)\n elif isinstance(other, VectorColumns):\n self.ids.extend(other.ids)\n self.vectors.extend(other.vectors)\n self.distances.extend(other.distances)\n new_len = len(self.ids)\n if other.attributes:\n if not self.attributes:\n self.attributes = dict()\n for k, v in other.attributes.items():\n attrs = self.attributes.setdefault(k, [None]*old_len)\n attrs.extend(v)\n if self.attributes:\n for v in self.attributes.values():\n if len(v) < new_len:\n v.extend([None] * (new_len-len(v)))\n return self\n elif isinstance(other, list):\n if len(other) == 0:\n return self\n if isinstance(other[0], VectorRow):\n self.ids.extend(row.id for row in other)\n self.vectors.extend(row.vector for row in other)\n self.distances.extend(row.dist for row in other)\n new_len = len(self.ids)\n if not self.attributes:\n self.attributes = dict()\n for i, row in enumerate(other):\n if row.attributes:\n for k, v in row.attributes.items():\n attrs = self.attributes.setdefault(k, [None]*new_len)\n attrs[old_len + i] = v\n for v in self.attributes.values():\n if len(v) < new_len:\n v.extend([None] * (new_len-len(v)))\n return self\n else:\n raise ValueError('VectorColumns.append unsupported list type:', type(other[0]))\n else:\n raise ValueError('VectorColumns.append unsupported type:', type(other))\n\n def from_rows(row_data: Union[VectorRow, Iterable[VectorRow]]) -> 'VectorColumns':\n ids = []\n vectors = []\n attributes = {}\n distances = []\n if isinstance(row_data, VectorRow):\n ids = [row_data.id]\n vectors = [row_data.vector]\n distances = [row_data.dist]\n if row_data.attributes:\n for k, v in row_data.attributes.items():\n attributes[k] = [v]\n return VectorColumns(ids=ids, vectors=vectors, attributes=attributes, distances=distances)\n elif isinstance(row_data, list):\n col_count = len(row_data)\n for i, row in enumerate(row_data):\n if isinstance(row, dict):\n parsed_row = VectorRow.from_dict(row)\n elif isinstance(row, VectorRow):\n parsed_row = row\n else:\n raise ValueError(f'Unsupported row data type: {type(row)}')\n\n ids += [parsed_row.id]\n vectors += [parsed_row.vector]\n distances += [parsed_row.dist]\n if parsed_row.attributes:\n for k, v in parsed_row.attributes.items():\n attrs = attributes.setdefault(k, [None]*col_count)\n attrs[i] = v\n elif isinstance(row_data, Iterable):\n raise ValueError('VectorColumns from Iterable not yet supported.')\n else:\n raise ValueError(f'Unsupported row data type: {type(row_data)}')\n return VectorColumns(ids=ids, vectors=vectors, attributes=attributes, distances=distances)" }, { "identifier": "VectorRow", "path": "turbopuffer/vectors.py", "snippet": "class VectorRow:\n \"\"\"\n The VectorRow type represents a single vector ID, along with its vector values and attributes.\n\n If the VectorRow is a response from a query, it may also have a distance weighting.\n \"\"\"\n\n id: Union[int, str]\n vector: Optional[List[float]] = None\n attributes: Optional[Dict[str, Optional[str]]] = None\n\n dist: Optional[float] = None\n\n def from_dict(source: dict) -> 'VectorRow':\n return VectorRow(\n id=source.get('id'),\n vector=source.get('vector'),\n attributes=source.get('attributes'),\n dist=source.get('dist'),\n )\n\n def __post_init__(self):\n if not isinstance(self.id, int) and not isinstance(self.id, str):\n raise ValueError('VectorRow.id must be an int or str, got:', type(self.id))\n if self.vector is not None:\n if 'numpy' in sys.modules and isinstance(self.vector, sys.modules['numpy'].ndarray):\n if self.vector.ndim != 1:\n raise ValueError(f'VectorRow.vector must a 1d-array, got {self.vector.ndim} dimensions')\n elif not isinstance(self.vector, list):\n raise ValueError('VectorRow.vector must be a list, got:', type(self.vector))\n if self.attributes is not None and not isinstance(self.attributes, dict):\n raise ValueError('VectorRow.attributes must be a dict, got:', type(self.attributes))\n\n def __str__(self) -> str:\n fields = []\n if isinstance(self.id, int):\n fields.append(f'id={self.id}')\n else:\n fields.append(f\"id='{self.id}'\")\n fields.append(f'vector={self.vector}')\n if self.attributes:\n fields.append(f'attributes={self.attributes}')\n if self.dist is not None:\n fields.append(f'dist={self.dist}')\n return f\"VectorRow({', '.join(fields)})\"" }, { "identifier": "batch_iter", "path": "turbopuffer/vectors.py", "snippet": "def batch_iter(iterable, n):\n it = iter(iterable)\n while True:\n batch = list(islice(it, n))\n if not batch:\n return\n yield batch" }, { "identifier": "Backend", "path": "turbopuffer/backend.py", "snippet": "class Backend:\n api_key: str\n api_base_url: str\n session: requests.Session\n\n def __init__(self, api_key: Optional[str] = None):\n self.api_key = find_api_key(api_key)\n self.api_base_url = tpuf.api_base_url\n self.session = requests.Session()\n self.session.headers.update({\n 'Authorization': f'Bearer {self.api_key}',\n 'User-Agent': f'tpuf-python/{tpuf.VERSION} {requests.utils.default_headers()[\"User-Agent\"]}',\n })\n\n def make_api_request(self,\n *args: List[str],\n method: Optional[str] = None,\n query: Optional[dict] = None,\n payload: Optional[dict] = None) -> dict:\n start = time.monotonic()\n if method is None and payload is not None:\n method = 'POST'\n request = requests.Request(method or 'GET', self.api_base_url + '/' + '/'.join(args))\n\n if query is not None:\n request.params = query\n\n if payload is not None:\n # before = time.monotonic()\n if isinstance(payload, dict):\n # before = time.monotonic()\n json_payload = tpuf.dump_json_bytes(payload)\n # print('Json time:', time.monotonic() - before)\n else:\n raise ValueError(f'Unsupported POST payload type: {type(payload)}')\n\n gzip_payload = gzip.compress(json_payload, compresslevel=1)\n # json_mebibytes = len(json_payload) / 1024 / 1024\n # gzip_mebibytes = len(gzip_payload) / 1024 / 1024\n # print(f'Gzip time ({json_mebibytes} MiB json / {gzip_mebibytes} MiB gzip):', time.monotonic() - before)\n\n request.headers.update({\n 'Content-Type': 'application/json',\n 'Content-Encoding': 'gzip',\n })\n request.data = gzip_payload\n\n prepared = self.session.prepare_request(request)\n\n retry_attempts = 0\n while retry_attempts < 3:\n # before = time.monotonic()\n try:\n # print(f'Sending request:', prepared.path_url, prepared.headers)\n response = self.session.send(prepared, allow_redirects=False)\n # print(f'Request time (HTTP {response.status_code}):', time.monotonic() - before)\n\n if response.status_code > 500:\n response.raise_for_status()\n\n content_type = response.headers.get('Content-Type', 'text/plain')\n if content_type == 'application/json':\n try:\n content = response.json()\n except json.JSONDecodeError as err:\n raise APIError(response.status_code, traceback.format_exception_only(err), response.text)\n\n if response.ok:\n # print(\"Total request time:\", time.monotonic() - start)\n return content\n else:\n raise APIError(response.status_code, content.get('status', 'error'), content.get('error', ''))\n else:\n raise APIError(response.status_code, 'Server returned non-JSON response', response.text)\n except requests.HTTPError:\n print(traceback.format_exc())\n print(\"retrying...\")\n retry_attempts += 1\n time.sleep(2)\n print(\"Total request time (failed):\", time.monotonic() - start)\n raise TurbopufferError('Failed after 3 retries')" }, { "identifier": "VectorQuery", "path": "turbopuffer/query.py", "snippet": "class FilterMatch(Enum):\nclass VectorQuery:\n EQ = 'Eq'\n NOT_EQ = 'NotEq'\n IN = 'In'\n GLOB = 'Glob'\n NOT_GLOB = 'NotGlob'\n def from_dict(source: dict) -> 'VectorQuery':\n def __post_init__(self):" } ]
import sys import turbopuffer as tpuf from turbopuffer.vectors import Cursor, VectorResult, VectorColumns, VectorRow, batch_iter from turbopuffer.backend import Backend from turbopuffer.query import VectorQuery, FilterTuple from typing import Dict, List, Optional, Iterable, Union, overload
5,770
def upsert(self, data: Union[Iterable[dict], Iterable[VectorRow]]) -> None: """ Creates or updates a multiple vectors provided as a list or iterator. If this call succeeds, data is guaranteed to be durably written to object storage. Upserting a vector will overwrite any existing vector with the same ID. """ ... @overload def upsert(self, data: VectorResult) -> None: """ Creates or updates multiple vectors. If this call succeeds, data is guaranteed to be durably written to object storage. Upserting a vector will overwrite any existing vector with the same ID. """ ... def upsert(self, data=None, ids=None, vectors=None, attributes=None) -> None: if data is None: if ids is not None and vectors is not None: return self.upsert(VectorColumns(ids=ids, vectors=vectors, attributes=attributes)) else: raise ValueError('upsert() requires both ids= and vectors= be set.') elif isinstance(data, VectorColumns): # "if None in data.vectors:" is not supported because data.vectors might be a list of np.ndarray # None == pd.ndarray is an ambiguous comparison in this case. for vec in data.vectors: if vec is None: raise ValueError('upsert() call would result in a vector deletion, use Namespace.delete([ids...]) instead.') response = self.backend.make_api_request('vectors', self.name, payload=data.__dict__) elif isinstance(data, VectorRow): raise ValueError('upsert() should be called on a list of vectors, got single vector.') elif isinstance(data, list): if isinstance(data[0], dict): return self.upsert(VectorColumns.from_rows(data)) elif isinstance(data[0], VectorRow): return self.upsert(VectorColumns.from_rows(data)) elif isinstance(data[0], VectorColumns): for columns in data: self.upsert(columns) return else: raise ValueError(f'Unsupported list data type: {type(data[0])}') elif isinstance(data, dict): if 'id' in data: raise ValueError('upsert() should be called on a list of vectors, got single vector.') elif 'ids' in data: return self.upsert(VectorColumns.from_dict(data)) else: raise ValueError('Provided dict is missing ids.') elif 'pandas' in sys.modules and isinstance(data, sys.modules['pandas'].DataFrame): if 'id' not in data.keys(): raise ValueError('Provided pd.DataFrame is missing an id column.') if 'vector' not in data.keys(): raise ValueError('Provided pd.DataFrame is missing a vector column.') # start = time.monotonic() for i in range(0, len(data), tpuf.upsert_batch_size): batch = data[i:i+tpuf.upsert_batch_size] attributes = dict() for key, values in batch.items(): if key != 'id' and key != 'vector': attributes[key] = values.tolist() columns = tpuf.VectorColumns( ids=batch['id'].tolist(), vectors=batch['vector'].transform(lambda x: x.tolist()).tolist(), attributes=attributes ) # time_diff = time.monotonic() - start # print(f"Batch {columns.ids[0]}..{columns.ids[-1]} begin:", time_diff, '/', len(batch), '=', len(batch)/time_diff) # before = time.monotonic() # print(columns) self.upsert(columns) # time_diff = time.monotonic() - before # print(f"Batch {columns.ids[0]}..{columns.ids[-1]} time:", time_diff, '/', len(batch), '=', len(batch)/time_diff) # start = time.monotonic() elif isinstance(data, Iterable): # start = time.monotonic() for batch in batch_iter(data, tpuf.upsert_batch_size): # time_diff = time.monotonic() - start # print('Batch begin:', time_diff, '/', len(batch), '=', len(batch)/time_diff) # before = time.monotonic() self.upsert(batch) # time_diff = time.monotonic() - before # print('Batch time:', time_diff, '/', len(batch), '=', len(batch)/time_diff) # start = time.monotonic() return else: raise ValueError(f'Unsupported data type: {type(data)}') assert response.get('status', '') == 'OK', f'Invalid upsert() response: {response}' def delete(self, ids: Union[int, str, List[int], List[str]]) -> None: """ Deletes vectors by id. """ if isinstance(ids, int) or isinstance(ids, str): response = self.backend.make_api_request('vectors', self.name, payload={ 'ids': [ids], 'vectors': [None], }) elif isinstance(ids, list): response = self.backend.make_api_request('vectors', self.name, payload={ 'ids': ids, 'vectors': [None] * len(ids), }) else: raise ValueError(f'Unsupported ids type: {type(ids)}') assert response.get('status', '') == 'OK', f'Invalid delete() response: {response}' @overload def query(self, vector: Optional[List[float]] = None, distance_metric: Optional[str] = None, top_k: int = 10, include_vectors: bool = False, include_attributes: Optional[Union[List[str], bool]] = None,
class Namespace: """ The Namespace type represents a set of vectors stored in turbopuffer. Within a namespace, vectors are uniquely referred to by their ID. All vectors in a namespace must have the same dimensions. """ name: str backend: Backend def __init__(self, name: str, api_key: Optional[str] = None): """ Creates a new turbopuffer.Namespace object for querying the turbopuffer API. This function does not make any API calls on its own. Specifying an api_key here will override the global configuration for API calls to this namespace. """ self.name = name self.backend = Backend(api_key) def __str__(self) -> str: return f'tpuf-namespace:{self.name}' @overload def upsert(self, ids: Union[List[int], List[str]], vectors: List[List[float]], attributes: Optional[Dict[str, List[Optional[str]]]] = None) -> None: """ Creates or updates multiple vectors provided in a column-oriented layout. If this call succeeds, data is guaranteed to be durably written to object storage. Upserting a vector will overwrite any existing vector with the same ID. """ ... @overload def upsert(self, data: Union[dict, VectorColumns]) -> None: """ Creates or updates multiple vectors provided in a column-oriented layout. If this call succeeds, data is guaranteed to be durably written to object storage. Upserting a vector will overwrite any existing vector with the same ID. """ ... @overload def upsert(self, data: Union[Iterable[dict], Iterable[VectorRow]]) -> None: """ Creates or updates a multiple vectors provided as a list or iterator. If this call succeeds, data is guaranteed to be durably written to object storage. Upserting a vector will overwrite any existing vector with the same ID. """ ... @overload def upsert(self, data: VectorResult) -> None: """ Creates or updates multiple vectors. If this call succeeds, data is guaranteed to be durably written to object storage. Upserting a vector will overwrite any existing vector with the same ID. """ ... def upsert(self, data=None, ids=None, vectors=None, attributes=None) -> None: if data is None: if ids is not None and vectors is not None: return self.upsert(VectorColumns(ids=ids, vectors=vectors, attributes=attributes)) else: raise ValueError('upsert() requires both ids= and vectors= be set.') elif isinstance(data, VectorColumns): # "if None in data.vectors:" is not supported because data.vectors might be a list of np.ndarray # None == pd.ndarray is an ambiguous comparison in this case. for vec in data.vectors: if vec is None: raise ValueError('upsert() call would result in a vector deletion, use Namespace.delete([ids...]) instead.') response = self.backend.make_api_request('vectors', self.name, payload=data.__dict__) elif isinstance(data, VectorRow): raise ValueError('upsert() should be called on a list of vectors, got single vector.') elif isinstance(data, list): if isinstance(data[0], dict): return self.upsert(VectorColumns.from_rows(data)) elif isinstance(data[0], VectorRow): return self.upsert(VectorColumns.from_rows(data)) elif isinstance(data[0], VectorColumns): for columns in data: self.upsert(columns) return else: raise ValueError(f'Unsupported list data type: {type(data[0])}') elif isinstance(data, dict): if 'id' in data: raise ValueError('upsert() should be called on a list of vectors, got single vector.') elif 'ids' in data: return self.upsert(VectorColumns.from_dict(data)) else: raise ValueError('Provided dict is missing ids.') elif 'pandas' in sys.modules and isinstance(data, sys.modules['pandas'].DataFrame): if 'id' not in data.keys(): raise ValueError('Provided pd.DataFrame is missing an id column.') if 'vector' not in data.keys(): raise ValueError('Provided pd.DataFrame is missing a vector column.') # start = time.monotonic() for i in range(0, len(data), tpuf.upsert_batch_size): batch = data[i:i+tpuf.upsert_batch_size] attributes = dict() for key, values in batch.items(): if key != 'id' and key != 'vector': attributes[key] = values.tolist() columns = tpuf.VectorColumns( ids=batch['id'].tolist(), vectors=batch['vector'].transform(lambda x: x.tolist()).tolist(), attributes=attributes ) # time_diff = time.monotonic() - start # print(f"Batch {columns.ids[0]}..{columns.ids[-1]} begin:", time_diff, '/', len(batch), '=', len(batch)/time_diff) # before = time.monotonic() # print(columns) self.upsert(columns) # time_diff = time.monotonic() - before # print(f"Batch {columns.ids[0]}..{columns.ids[-1]} time:", time_diff, '/', len(batch), '=', len(batch)/time_diff) # start = time.monotonic() elif isinstance(data, Iterable): # start = time.monotonic() for batch in batch_iter(data, tpuf.upsert_batch_size): # time_diff = time.monotonic() - start # print('Batch begin:', time_diff, '/', len(batch), '=', len(batch)/time_diff) # before = time.monotonic() self.upsert(batch) # time_diff = time.monotonic() - before # print('Batch time:', time_diff, '/', len(batch), '=', len(batch)/time_diff) # start = time.monotonic() return else: raise ValueError(f'Unsupported data type: {type(data)}') assert response.get('status', '') == 'OK', f'Invalid upsert() response: {response}' def delete(self, ids: Union[int, str, List[int], List[str]]) -> None: """ Deletes vectors by id. """ if isinstance(ids, int) or isinstance(ids, str): response = self.backend.make_api_request('vectors', self.name, payload={ 'ids': [ids], 'vectors': [None], }) elif isinstance(ids, list): response = self.backend.make_api_request('vectors', self.name, payload={ 'ids': ids, 'vectors': [None] * len(ids), }) else: raise ValueError(f'Unsupported ids type: {type(ids)}') assert response.get('status', '') == 'OK', f'Invalid delete() response: {response}' @overload def query(self, vector: Optional[List[float]] = None, distance_metric: Optional[str] = None, top_k: int = 10, include_vectors: bool = False, include_attributes: Optional[Union[List[str], bool]] = None,
filters: Optional[Dict[str, List[FilterTuple]]] = None) -> VectorResult:
6
2023-12-12 06:52:27+00:00
8k
Prismadic/magnet
magnet/utils/mlx/mistral.py
[ { "identifier": "_f", "path": "magnet/utils/globals.py", "snippet": "def _f(\n tag: str = None,\n body: any = None,\n no_print: bool = False,\n luxe: bool = False\n):\n \"\"\"\n The `_f` function is a logging utility that prints messages with different tags and colors based on\n the provided parameters.\n\n :param tag: The `tag` parameter is a string that represents the tag for the log message. It can be\n one of the following values: \"FATAL\", \"WARN\", \"INFO\", \"WAIT\", or \"SUCCESS\"\n :type tag: str\n :param body: The `body` parameter is used to specify the message or content that you want to\n display. It can be of any type\n :type body: any\n :param no_print: The `no_print` parameter is a boolean flag that determines whether the output\n should be printed or returned as a string.\n the formatted string without printing it. If `no_print` is set to `False` (default)\n :type no_print: bool (optional)\n :param luxe: The `luxe` parameter is a boolean flag that determines whether to use a more luxurious\n and colorful output format. If `luxe` is set to `True`, the output will include random colors,\n emojis, and matrix-like characters.\n :type luxe: bool (optional)\n :return: The function `_f` returns a formatted string if the `no_print` parameter is set to `True`.\n If `no_print` is `False`, the function prints the formatted string and returns `None`.\n \"\"\"\n tags = [\n (\"FATAL\", \"☠️\", \"\\033[91m\"), # Red color for FATAL\n (\"WARN\", \"🚨\", \"\\033[93m\"), # Yellow color for WARN\n (\"INFO\", \"ℹ️\", \"\\033[94m\"), # Blue color for INFO\n (\"WAIT\", \"☕️\", \"\\033[96m\"), # Cyan color for WAIT\n (\"SUCCESS\", \"🌊\", \"\\033[92m\"), # Green color for SUCCESS\n ]\n _luxe = [\n \"\\033[31m\",\n \"\\033[32m\",\n \"\\033[33m\",\n \"\\033[34m\",\n \"\\033[35m\",\n \"\\033[36m\",\n \"\\033[91m\",\n \"\\033[92m\",\n \"\\033[93m\",\n \"\\033[94m\",\n \"\\033[95m\",\n \"\\033[96m\",\n ]\n _matrix = [\"⣾\", \"⣽\", \"⣻\", \"⢿\", \"⡿\", \"⣟\", \"⣯\", \"⣷\"]\n _joy = [\n \"🍤\",\n \"🌈\",\n \"📊\",\n \"🏁\",\n \"🌊\",\n \"🧠\",\n \"✨\",\n \"🧮\",\n \"🎉\",\n \"🥳\",\n \"🤩\",\n \"🐈\",\n \"❤️\",\n \"💙\",\n \"💜\",\n \"💚\",\n \"💛\",\n \"🧡\",\n \"⭐️\",\n ]\n matching_tags = [x for x in tags if x[0] == tag.upper()]\n if matching_tags:\n tag_text = matching_tags[0][0]\n emoji = matching_tags[0][1]\n color_code = matching_tags[0][2]\n if luxe:\n return (\n f\"{_luxe[random.randint(0,len(_luxe)-1)]} {_joy[random.randint(0,len(_joy)-1)]} {_matrix[random.randint(0,len(_matrix)-1)]}: {body}\\033[0m\"\n if no_print\n else print(\n f\"{_luxe[random.randint(0,len(_luxe)-1)]} {_joy[random.randint(0,len(_joy)-1)]} {_matrix[random.randint(0,len(_matrix)-1)]}: {body}\\033[0m\"\n )\n )\n else:\n return (\n f\"{color_code} {emoji} {tag_text}: {body}\\033[0m\"\n if no_print\n else print(f\"{color_code}{emoji} {tag_text}: {body}\\033[0m\")\n )\n else:\n print(f\"😭 UNKNOWN TAG - `{tag}`\")" }, { "identifier": "MistralArgs", "path": "magnet/utils/data_classes.py", "snippet": "class MistralArgs:\n \"\"\"\n Represents a set of arguments for the Mistral model.\n\n Args:\n dim (int): The dimensionality of the model.\n n_layers (int): The number of layers in the model.\n head_dim (int): The dimensionality of each attention head.\n hidden_dim (int): The dimensionality of the hidden layer in the feed-forward network.\n n_heads (int): The number of attention heads.\n n_kv_heads (int): The number of attention heads used for key-value attention.\n norm_eps (float): The epsilon value used for numerical stability in layer normalization.\n vocab_size (int): The size of the vocabulary used in the model.\n \"\"\"\n\n dim: int\n n_layers: int\n head_dim: int\n hidden_dim: int\n n_heads: int\n n_kv_heads: int\n norm_eps: float\n vocab_size: int" } ]
import json import time import mlx.core as mx import mlx.nn as nn from dataclasses import dataclass from pathlib import Path from typing import List, Optional, Tuple from mlx.utils import tree_unflatten from sentencepiece import SentencePieceProcessor from magnet.utils.globals import _f from magnet.utils.data_classes import MistralArgs
3,903
@property def eos_id(self) -> int: """ Returns the ID of the end-of-sentence token in the tokenizer's model. Returns: int: The ID of the end-of-sentence token in the tokenizer's model. """ return self._model.eos_id() @property def pad_id(self) -> int: """ Returns the ID of the padding token in the tokenizer's model. Returns: int: The ID of the padding token in the tokenizer's model. """ return self._model.pad_id() def encode(self, s: str) -> List[int]: return [self._model.bos_id(), *self._model.encode(s)] def decode(self, t: List[int]) -> str: """ Decodes a list of token IDs into a string. Args: t (List[int]): A list of token IDs to be decoded into a string. Returns: str: The decoded string corresponding to the input list of token IDs. """ out = self._model.decode(t) if t and self._model.id_to_piece(t[0])[0] == self._sep: return " " + out return out def load_model(folder: str): """ Load a pre-trained language model and tokenizer from a specified folder. Args: folder (str): The path to the folder containing the pre-trained model. Returns: model (Mistral): The loaded pre-trained language model. tokenizer (Tokenizer): The initialized tokenizer. """ model_path = Path(folder) tokenizer = Tokenizer(str(model_path / "tokenizer.model")) with open(model_path / "config.json", "r") as f: config = json.loads(f.read()) config.pop("sliding_window", None) config.pop("model_type", None) quantization = config.pop("quantization", None) model_args = MistralArgs(**config) weights = mx.load(str(model_path / "weights.npz")) weights = tree_unflatten(list(weights.items())) model = Mistral(model_args) if quantization is not None: nn.QuantizedLinear.quantize_module(model, **quantization) model.update(weights) mx.eval(model.parameters()) return model, tokenizer def infer(prompt: mx.array, model: Mistral, temp: Optional[float] = 0.0): """ Generates a sequence of tokens using a pre-trained language model. Args: prompt (mx.array): An mxnet array representing the initial prompt for generating the sequence. model (Mistral): An instance of the Mistral class, which is a pre-trained language model. temp (float, optional): A float representing the temperature parameter for controlling the randomness of the generated sequence. Defaults to 0.0. Yields: mx.array: Generated tokens, one by one. Example: prompt = mx.array(tokenizer.encode("The cat")) model = Mistral(args) temp = 0.8 for token in infer(prompt, model, temp): print(token) """ def sample(logits): if temp == 0: return mx.argmax(logits, axis=-1) else: return mx.random.categorical(logits * (1 / temp)) logits, cache = model(prompt[None]) y = sample(logits[:, -1, :]) yield y while True: logits, cache = model(y[:, None], cache) y = sample(logits.squeeze(1)) yield y def generate(payload): """ Generate a sequence of tokens using a pre-trained language model. Args: payload (dict): A dictionary containing the following keys: - 'seed' (int): The random seed for reproducibility. - 'model_path' (str): The path to the pre-trained model. - 'prompt' (str): The initial prompt for generating the sequence. - 'temp' (float): The temperature parameter for controlling the randomness of the generated sequence. - 'max_tokens' (int): The maximum number of tokens to generate. Returns: str: The generated sequence of tokens decoded into a string. """ mx.random.seed(payload['seed'])
# Copyright © 2023 Apple Inc. # docstrings - 2023 Prismadic, LLC. class RMSNorm(nn.Module): def __init__(self, dims: int, eps: float = 1e-5): """ Initializes the attributes of the RMSNorm class. Args: dims (int): The number of dimensions for the weight attribute. eps (float, optional): The epsilon value for numerical stability. Defaults to 1e-5. Returns: None """ super(RMSNorm, self).__init__() self.dims = dims self.eps = eps def forward(self, x): """ Applies RMS normalization to the input array. Args: x (torch.Tensor): The input array to be normalized. Returns: torch.Tensor: The normalized array. """ return x / torch.sqrt(torch.mean(x**2, dim=self.dims, keepdim=True) + self.eps) super().__init__() self.weight = mx.ones((dims,)) self.eps = eps def _norm(self, x): return x * mx.rsqrt(x.square().mean(-1, keepdims=True) + self.eps) def __call__(self, x): """ Apply RMS normalization to the input array `x` and return the normalized output. Args: x (ndarray): The input array to be normalized. Returns: ndarray: The normalized output array, which is the result of applying RMS normalization to the input array `x`. """ output = self._norm(x.astype(mx.float32)).astype(x.dtype) return self.weight * output class Attention(nn.Module): """ The `Attention` class is responsible for performing the attention computation in a transformer block. Args: args (MistralArgs): An instance of `MistralArgs` that contains the arguments for the attention computation. Attributes: args (MistralArgs): An instance of `MistralArgs` that contains the arguments for the attention computation. n_heads (int): The number of attention heads. n_kv_heads (int): The number of key-value attention heads. repeats (int): The number of times to repeat the key-value attention heads. scale (float): The scaling factor for the attention scores. wq (nn.Linear): Linear layer for the query projection. wk (nn.Linear): Linear layer for the key projection. wv (nn.Linear): Linear layer for the value projection. wo (nn.Linear): Linear layer for the output projection. rope (nn.RoPE): Instance of `nn.RoPE` class for relative positional encoding. """ def __init__(self, args: MistralArgs): super().__init__() self.args = args self.n_heads: int = args.n_heads self.n_kv_heads: int = args.n_kv_heads self.repeats = self.n_heads // self.n_kv_heads self.scale = self.args.head_dim**-0.5 self.wq = nn.Linear(args.dim, args.n_heads * args.head_dim, bias=False) self.wk = nn.Linear(args.dim, args.n_kv_heads * args.head_dim, bias=False) self.wv = nn.Linear(args.dim, args.n_kv_heads * args.head_dim, bias=False) self.wo = nn.Linear(args.n_heads * args.head_dim, args.dim, bias=False) self.rope = nn.RoPE(args.head_dim, traditional=True) def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Tuple[mx.array, mx.array]] = None, ) -> mx.array: """ Perform attention computation on the input array `x`. Args: x (mx.array): The input array of shape (batch_size, sequence_length, dimension). mask (Optional[mx.array]): An optional mask array of shape (batch_size, sequence_length) to mask certain elements in the input array. cache (Optional[Tuple[mx.array, mx.array]]): An optional cache tuple containing two arrays of shape (batch_size, sequence_length, dimension) to store intermediate results. Returns: mx.array: The final output array of shape (batch_size, sequence_length, dimension). Optional[Tuple[mx.array, mx.array]]: The updated cache tuple containing two arrays of shape (batch_size, sequence_length, dimension). """ B, L, D = x.shape queries, keys, values = self.wq(x), self.wk(x), self.wv(x) # Prepare the queries, keys and values for the attention computation queries = queries.reshape(B, L, self.n_heads, -1).transpose(0, 2, 1, 3) keys = keys.reshape(B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) values = values.reshape( B, L, self.n_kv_heads, -1).transpose(0, 2, 1, 3) def repeat(a): a = mx.concatenate([mx.expand_dims(a, 2)] * self.repeats, axis=2) return a.reshape([B, self.n_heads, L, -1]) keys, values = map(repeat, (keys, values)) if cache is not None: key_cache, value_cache = cache queries = self.rope(queries, offset=key_cache.shape[2]) keys = self.rope(keys, offset=key_cache.shape[2]) keys = mx.concatenate([key_cache, keys], axis=2) values = mx.concatenate([value_cache, values], axis=2) else: queries = self.rope(queries) keys = self.rope(keys) scores = (queries * self.scale) @ keys.transpose(0, 1, 3, 2) if mask is not None: scores += mask scores = mx.softmax(scores.astype(mx.float32), axis=-1).astype(scores.dtype) output = (scores @ values).transpose(0, 2, 1, 3).reshape(B, L, -1) return self.wo(output), (keys, values) class FeedForward(nn.Module): """ Applies a feed-forward neural network to the input data. Args: args (MistralArgs): The arguments for the model. Example Usage: args = MistralArgs(dim=512, hidden_dim=2048) feed_forward = FeedForward(args) output = feed_forward(input) """ def __init__(self, args: MistralArgs): """ Initializes the FeedForward class. Args: args (MistralArgs): The arguments for the model. """ super().__init__() self.w1 = nn.Linear(args.dim, args.hidden_dim, bias=False) self.w2 = nn.Linear(args.hidden_dim, args.dim, bias=False) self.w3 = nn.Linear(args.dim, args.hidden_dim, bias=False) def __call__(self, x) -> mx.array: """ Applies the feed-forward neural network to the input data. Args: x: The input data. Returns: mx.array: The output of the feed-forward neural network. """ return self.w2(nn.silu(self.w1(x)) * self.w3(x)) class TransformerBlock(nn.Module): """ Initializes the attributes of the TransformerBlock class and creates instances of other required classes. Args: args (MistralArgs): An instance of the MistralArgs class that contains the arguments for the transformer block. Example Usage: args = MistralArgs(dim=512, n_heads=8, norm_eps=1e-5) block = TransformerBlock(args) Flow: 1. Initialize the n_heads attribute of the TransformerBlock instance with the value from args.n_heads. 2. Initialize the dim attribute of the TransformerBlock instance with the value from args.dim. 3. Create an instance of the Attention class and assign it to the attention attribute of the TransformerBlock instance, passing args as an argument. 4. Create an instance of the FeedForward class and assign it to the feed_forward attribute of the TransformerBlock instance, passing args as an argument. 5. Create an instance of the RMSNorm class and assign it to the attention_norm attribute of the TransformerBlock instance, passing args.dim and args.norm_eps as arguments. 6. Create an instance of the RMSNorm class and assign it to the ffn_norm attribute of the TransformerBlock instance, passing args.dim and args.norm_eps as arguments. 7. Assign the args argument to the args attribute of the TransformerBlock instance. Returns: None """ def __init__(self, args: MistralArgs): super().__init__() self.n_heads = args.n_heads self.dim = args.dim self.attention = Attention(args) self.feed_forward = FeedForward(args=args) self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps) self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps) self.args = args def __call__( self, x: mx.array, mask: Optional[mx.array] = None, cache: Optional[Tuple[mx.array, mx.array]] = None, ) -> mx.array: """ Apply the TransformerBlock to the input array. Args: x (mx.array): The input array of shape (batch_size, sequence_length). mask (Optional[mx.array]): An optional mask array of shape (batch_size, sequence_length) to mask certain elements in the input array. cache (Optional[Tuple[mx.array, mx.array]]): An optional cache tuple containing two arrays of shape (batch_size, sequence_length, hidden_dim) to store intermediate results. Returns: out (mx.array): The final output array of shape (batch_size, sequence_length, hidden_dim). cache (Optional[Tuple[mx.array, mx.array]]): The updated cache tuple containing two arrays of shape (batch_size, sequence_length, hidden_dim). """ r, cache = self.attention(self.attention_norm(x), mask, cache) h = x + r r = self.feed_forward(self.ffn_norm(h)) out = h + r return out, cache class Mistral(nn.Module): """ A language model that performs a series of operations on an input array using transformer blocks. Args: args (MistralArgs): The model arguments that define the dimensions and parameters of the language model. Attributes: args (MistralArgs): The model arguments that define the dimensions and parameters of the language model. vocab_size (int): The size of the vocabulary. n_layers (int): The number of transformer blocks in the language model. tok_embeddings (nn.Embedding): The token embedding layer. layers (List[TransformerBlock]): The list of transformer blocks. norm (RMSNorm): The RMS normalization layer. output (nn.Linear): The output layer. """ def __init__(self, args: MistralArgs): super().__init__() self.args = args self.vocab_size = args.vocab_size self.n_layers = args.n_layers assert self.vocab_size > 0 self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim) self.layers = [TransformerBlock(args=args) for _ in range(args.n_layers)] self.norm = RMSNorm(args.dim, eps=args.norm_eps) self.output = nn.Linear(args.dim, args.vocab_size, bias=False) def __call__(self, inputs: mx.array, cache=None): """ Perform a series of operations on the input array using the layers defined in the class. Args: inputs (mx.array): An array representing the input data. It should have shape (batch_size, sequence_length). cache (list, optional): The cache value for each layer. Default is None. Returns: mx.array: The output array after passing through the output layer and applying normalization. It has shape (batch_size, sequence_length, vocab_size). list: The updated cache after processing the input array through the layers. It is a list of length equal to the number of layers in the model, where each element is the cache value for the corresponding layer. """ h = self.tok_embeddings(inputs) mask = None if h.shape[1] > 1: mask = nn.MultiHeadAttention.create_additive_causal_mask( h.shape[1]) mask = mask.astype(h.dtype) # Rest of the code remains the same if cache is None: cache = [None] * len(self.layers) for e, layer in enumerate(self.layers): h, cache[e] = layer(h, mask, cache[e]) return self.output(self.norm(h)), cache class Tokenizer: """ Initializes the tokenizer object by loading a SentencePiece model from a given file path and setting the separator character. Args: model_path (str): The file path of the SentencePiece model. Raises: AssertionError: If the file specified by `model_path` does not exist. AssertionError: If the vocabulary size of the model does not match the number of pieces in the model. """ def __init__(self, model_path: str): assert Path(model_path).exists(), model_path self._model = SentencePieceProcessor(model_file=model_path) self._sep = "▁" assert self._model.vocab_size() == self._model.get_piece_size() @property def eos_id(self) -> int: """ Returns the ID of the end-of-sentence token in the tokenizer's model. Returns: int: The ID of the end-of-sentence token in the tokenizer's model. """ return self._model.eos_id() @property def pad_id(self) -> int: """ Returns the ID of the padding token in the tokenizer's model. Returns: int: The ID of the padding token in the tokenizer's model. """ return self._model.pad_id() def encode(self, s: str) -> List[int]: return [self._model.bos_id(), *self._model.encode(s)] def decode(self, t: List[int]) -> str: """ Decodes a list of token IDs into a string. Args: t (List[int]): A list of token IDs to be decoded into a string. Returns: str: The decoded string corresponding to the input list of token IDs. """ out = self._model.decode(t) if t and self._model.id_to_piece(t[0])[0] == self._sep: return " " + out return out def load_model(folder: str): """ Load a pre-trained language model and tokenizer from a specified folder. Args: folder (str): The path to the folder containing the pre-trained model. Returns: model (Mistral): The loaded pre-trained language model. tokenizer (Tokenizer): The initialized tokenizer. """ model_path = Path(folder) tokenizer = Tokenizer(str(model_path / "tokenizer.model")) with open(model_path / "config.json", "r") as f: config = json.loads(f.read()) config.pop("sliding_window", None) config.pop("model_type", None) quantization = config.pop("quantization", None) model_args = MistralArgs(**config) weights = mx.load(str(model_path / "weights.npz")) weights = tree_unflatten(list(weights.items())) model = Mistral(model_args) if quantization is not None: nn.QuantizedLinear.quantize_module(model, **quantization) model.update(weights) mx.eval(model.parameters()) return model, tokenizer def infer(prompt: mx.array, model: Mistral, temp: Optional[float] = 0.0): """ Generates a sequence of tokens using a pre-trained language model. Args: prompt (mx.array): An mxnet array representing the initial prompt for generating the sequence. model (Mistral): An instance of the Mistral class, which is a pre-trained language model. temp (float, optional): A float representing the temperature parameter for controlling the randomness of the generated sequence. Defaults to 0.0. Yields: mx.array: Generated tokens, one by one. Example: prompt = mx.array(tokenizer.encode("The cat")) model = Mistral(args) temp = 0.8 for token in infer(prompt, model, temp): print(token) """ def sample(logits): if temp == 0: return mx.argmax(logits, axis=-1) else: return mx.random.categorical(logits * (1 / temp)) logits, cache = model(prompt[None]) y = sample(logits[:, -1, :]) yield y while True: logits, cache = model(y[:, None], cache) y = sample(logits.squeeze(1)) yield y def generate(payload): """ Generate a sequence of tokens using a pre-trained language model. Args: payload (dict): A dictionary containing the following keys: - 'seed' (int): The random seed for reproducibility. - 'model_path' (str): The path to the pre-trained model. - 'prompt' (str): The initial prompt for generating the sequence. - 'temp' (float): The temperature parameter for controlling the randomness of the generated sequence. - 'max_tokens' (int): The maximum number of tokens to generate. Returns: str: The generated sequence of tokens decoded into a string. """ mx.random.seed(payload['seed'])
_f('wait', f"loading model from {payload['model_path']}")
0
2023-12-12 14:11:21+00:00
8k
happyapplehorse/ai-care
src/ai_care/ai_care.py
[ { "identifier": "Ability", "path": "src/ai_care/abilities.py", "snippet": "class Ability:\n def __init__(self, ai_care: AICare):\n self.ai_care = ai_care\n self.abilities: dict = {}\n self._register_abilities()\n\n def _register_abilities(self) -> None:\n for name, method in inspect.getmembers(self, predicate=inspect.ismethod):\n if getattr(method, '_ability_', False):\n self.abilities[name] = method\n\n @_ability(\n description=\"Remain silent.\",\n )\n def stay_silent(self) -> None:\n return\n \n @_ability(\n description=\"Speak to the user right now.\",\n )\n @_ability_parameter(\n name=\"content\",\n description=\"The content you want to say.\",\n )\n def speak_now(self, content: str | Generator[str, None, None]) -> None:\n self.ai_care.to_user_method(content)\n\n @_ability(\n description=(\n \"Set a message to be delivered to the user after a certain period of time. \"\n \"This choice means you decide to observe for a duration, and if the user does \"\n \"not speak to you during this time, you will then convey the message content you have set. \"\n \"However, if the user speaks to you within this time frame, the operation will automatically \"\n \"be cancelled. This option is recommended.\"\n )\n )\n @_ability_parameter(\n name=\"delay\",\n description=\"Set how long until your message is sent. Unit in seconds.\",\n )\n @_ability_parameter(\n name=\"message\",\n description=\"Set what you want to say to the user after a certain period of time.\",\n )\n def speak_after(self, delay: float | int, message: str) -> None:\n if self.ai_care._stream_mode is True:\n message_wrap = (string for string in [message])\n else:\n message_wrap = message\n self.ai_care.set_timer(interval=delay, function=self.ai_care.to_user_method, args=(message_wrap,))\n \n @_ability(\n description=(\n \"Detect environmental conditions. This choice means that you decide to first obtain the results \"\n \"from the sensors, observe the environmental situation, and then decide what to choose based on \"\n \"this information. You can only choose which sensors to use from the list of available sensors.\"\n ),\n )\n @_ability_parameter(\n name=\"delay\",\n description=\"Set how long to wait before using sensors to obtain readings. Unit in seconds.\",\n )\n @_ability_parameter(\n name=\"sensors\",\n description=\"The list of names of the sensors to be used.\",\n param_type=\"list[str]\",\n )\n @_auto_depth(depth_param_name=\"_depth_left\")\n def detect_env(self, delay: float | int, sensors: list[str], _depth_left: int) -> None:\n def detect_env_callback(sensors_list: list[str]):\n sensor_data = {}\n for sensor in sensors_list:\n data = self.ai_care.get_sensor_data(sensor)\n sensor_data[sensor] = data\n self.ai_care.ask(\n messages_list=[\n {\n \"role\": \"ai_care\",\n \"content\": f\"The results of the sensor are as follows: {str(sensor_data)}.\",\n },\n ],\n depth_left = _depth_left - 1,\n )\n not_existed_sensors_set = set(sensors) - set(self.ai_care.sensors)\n if not_existed_sensors_set:\n self.ai_care.ask(\n messages_list=[\n {\n \"role\": \"ai_care\",\n \"content\": f\"There are no {str(not_existed_sensors_set)} sensor. Please use the correct sensor name.\",\n }\n ],\n depth_left = _depth_left - 1,\n )\n return\n self.ai_care.set_timer(\n interval=delay,\n function=detect_env_callback,\n args=(sensors,),\n )\n\n @_ability(\n description=(\n \"Release detectors. This option means you forego making an active choice and instead release \"\n \"some detectors, which will automatically report back to you and ask for your decision when \"\n \"specific conditions are met. You can only choose which detectors to use from the list of \"\n \"available detectors.\"\n ),\n )\n @_ability_parameter(\n name=\"delay\",\n description=\"Set the time in seconds before releasing the detectors.\",\n )\n @_ability_parameter(\n name=\"detectors\",\n description=\"The list of names of the detectors to be released.\",\n )\n @_auto_depth(depth_param_name=\"_depth_left\")\n def release_detector(self, delay: int | float, detectors: list[str], _depth_left: int) -> None:\n def release_detector_callback(detectors_list: list[str]):\n self.ai_care.release_detector(detectors_list)\n not_existed_detectors_set = set(detectors) - set(self.ai_care.detectors)\n if not_existed_detectors_set:\n self.ai_care.ask(\n messages_list=[\n {\n \"role\": \"ai_care\",\n \"content\": f\"There are no {str(not_existed_detectors_set)} detector. Please use the correct detector name.\",\n }\n ],\n depth_left = _depth_left - 1,\n )\n return\n self.ai_care.set_timer(interval=delay, function=release_detector_callback, args=(detectors,))\n\n @_ability(\n description=(\n \"Ask again after a while. This choice means that you do not want to make a decision right now, \"\n \"and you would like Aicarey to ask you again later.\"\n ),\n )\n @_ability_parameter(\n name=\"delay\",\n description=\"Set how long to wait before asking you again, in seconds.\",\n )\n @_auto_depth(depth_param_name=\"_depth_left\")\n def ask_later(self, delay: int | float, _depth_left: int) -> None:\n if self.ai_care._ask_later_count_left <= 0:\n return\n self.ai_care._ask_later_count_left -= 1\n self.ai_care.set_timer(\n interval=delay,\n function=self.ai_care.ask,\n kwargs={\n \"messages_list\": [\n {\n \"role\": \"ai_care\",\n \"content\": (\n f\"{delay} senconds ago, you asked me to inquire later, \"\n f\"and now {delay} seconds have passed. Please make a choice.\"\n )\n }\n ],\n \"depth_left\": _depth_left - 1,\n },\n )\n\n @_ability(\n description=(\n \"Cycle release of detectors. This option means that you forgo making an active choice \"\n \"and instead continuously release some detectors, which will automatically report back \"\n \"to you and ask for your decision when specific conditions are met. The detectors you \"\n \"select will be released periodically at set time intervals until the next conversation \"\n \"is initiated. All chosen detectors will be released in each cycle.\"\n ),\n )\n @_ability_parameter(\n name=\"interval\",\n description=\"Set the time interval between each release of the detectors. Unit in seconds\",\n )\n @_ability_parameter(\n name=\"detectors\",\n description=\"The list of names of the detectors to be released.\",\n )\n @_auto_depth(depth_param_name=\"_depth_left\")\n def cyclic_detection(self, interval: int | float, detectors: list[str], _depth_left) -> None:\n not_existed_detectors_set = set(detectors) - set(self.ai_care.detectors)\n if not_existed_detectors_set:\n self.ai_care.ask(\n messages_list=[\n {\n \"role\": \"ai_care\",\n \"content\": f\"There are no {str(not_existed_detectors_set)} detector. Please use the correct detector name.\",\n }\n ],\n depth_left = _depth_left - 1,\n )\n return\n def repeat_interval(interval: float):\n while True:\n yield interval\n self.ai_care.set_cyclic_detection(\n detectors=detectors,\n interval_gen=repeat_interval(float(interval)),\n cancel_after_trigger_ask=True\n )" }, { "identifier": "choice_execute", "path": "src/ai_care/choice_execute.py", "snippet": "def choice_execute(ai_care: AICare, choice_code: str, content: str | Generator[str, None, None], depth_left: int) -> None:\n try:\n choice = Choice(choice_code)\n except ValueError as e:\n logger.warning(f\"Invalid choice {choice_code}.\")\n ai_care.ask(\n messages_list=[\n {\n \"role\": \"assistant\",\n \"content\": f\"AA00{choice_code}{choice_code}:{content}\",\n },\n {\n \"role\": \"ai_care\",\n \"content\": f\"Your choice code {choice_code} is not correct. Please make a correct choice again.\",\n },\n ],\n depth_left = depth_left - 1,\n )\n return\n\n if choice == Choice.ERROR:\n ai_care.ask(messages_list=[], depth_left = depth_left - 1)\n return\n logger.info(f\"Choice: {choice.name}\")\n\n if isinstance(content, str):\n ai_care._ask_context.append(\n {\n \"role\": \"assistant\",\n \"content\": f\"AA00{choice_code}{choice_code}:{content}\",\n }\n )\n elif isinstance(content, Generator):\n # This case has been handled in parse_response.\n pass\n else:\n assert False\n\n ability_method = ai_care.ability.abilities[choice.name.lower()]\n \n if choice == Choice.STAY_SILENT:\n ability_method()\n return\n \n if choice == Choice.SPEAK_NOW:\n params = {\"content\": content}\n else:\n assert isinstance(content, str)\n try:\n params = json.loads(content)\n except json.JSONDecodeError as e:\n logger.warning(f\"Failed to correctly parse the parameter. Parameter json string: {content}. Error: {e}.\")\n ai_care.ask(\n messages_list=[\n {\n \"role\": \"ai_care\",\n \"content\": (\n \"Failed to correctly parse the parameter. \"\n \"Please send the correct parameters in JSON format, \"\n \"or make a choice again.\"\n ),\n },\n ],\n depth_left = depth_left - 1,\n )\n return\n if not isinstance(params, dict):\n ai_care.ask(\n messages_list=[\n {\n \"role\": \"ai_care\",\n \"content\": (\n \"The parameters should be a dictionary in JSON format.\"\n \"Please send the correct parameters in JSON format, or make a choice again.\"\n ),\n },\n ],\n depth_left = depth_left - 1,\n )\n return\n\n ability_params = {}\n for param in ability_method._ability_parameters_:\n default_value = param[\"default_value\"]\n if param[\"required\"] is False:\n ability_params[param[\"name\"]] = default_value\n ability_params.update(params)\n if getattr(ability_method, \"_auto_depth_\", False):\n ability_params[ability_method._depth_param_name_] = depth_left\n\n needed_params_set = {param[\"name\"] for param in ability_method._ability_parameters_}\n missing_params_set = needed_params_set - set(ability_params.keys())\n\n if missing_params_set:\n ai_care.ask(\n messages_list=[\n {\n \"role\": \"ai_care\",\n \"content\": (\n f\"You did not provide the following parameters: {str(missing_params_set)} .\"\n \"Please send the correct parameters in JSON format, or make a choice again.\"\n ),\n },\n ],\n depth_left = depth_left - 1\n )\n return\n\n logger.info(f\"Choice parameters: {str(ability_params)}\")\n ability_method(**ability_params)" }, { "identifier": "parse_response", "path": "src/ai_care/parse_response.py", "snippet": "def parse_response(\n ai_care: AICare,\n response: str | Generator[str, None, None],\n) -> tuple[str, str | Generator[str, None, None]]:\n choice_code = \"\"\n content = \"\"\n if isinstance(response, str):\n ai_care._stream_mode = False\n prefix, content = _extract_info(response)\n if not all(prefix):\n return '00', ''\n choice_code = prefix[2]\n check_valid = prefix[3]\n if choice_code == check_valid:\n ai_care._valid_msg_count += 1\n else:\n ai_care._invalid_msg_count += 1\n assert isinstance(choice_code, str)\n return choice_code, content or \"\"\n elif isinstance(response, Generator):\n ai_care._stream_mode = True\n buffer = \"\"\n first_item = \"\"\n chunk_list = []\n found_choice = False\n prefix = (None, None, None, None)\n choice = Choice.ERROR\n for chunk in response:\n buffer += chunk\n if not found_choice:\n prefix, content = _extract_info(buffer)\n if all(prefix) and len(buffer) >= 9 and not found_choice:\n choice_code = prefix[2]\n check_valid = prefix[3]\n if choice_code == check_valid:\n ai_care._valid_msg_count += 1\n else:\n ai_care._invalid_msg_count += 1\n try:\n choice = Choice(choice_code)\n except ValueError as e:\n logger.warning(f\"Invalid choice {choice_code}. Error: {e}\")\n assert isinstance(choice_code, str)\n return choice_code, ''\n found_choice = True\n if choice == Choice.SPEAK_NOW:\n first_item = buffer[9:]\n break\n chunk_list.append(buffer[9:])\n continue\n if found_choice:\n chunk_list.append(chunk)\n if found_choice is False:\n return '00', ''\n prefix = cast(tuple[str, str, str, str], prefix)\n if choice == Choice.SPEAK_NOW:\n def response_content_gen():\n gen_content_record = []\n yield first_item\n gen_content_record.append(first_item)\n for item in response:\n yield item\n gen_content_record.append(item)\n ai_care._ask_context.append(\n {\n \"role\": \"ai_care\",\n \"content\": ''.join(gen_content_record),\n }\n )\n return choice.value, response_content_gen()\n else:\n return choice.value, ''.join(chunk_list)\n else:\n assert False, \"The response must be str or a generator.\"" }, { "identifier": "render_basic_prompt", "path": "src/ai_care/render_prompt.py", "snippet": "def render_basic_prompt(\n ai_care: AICare,\n inactive_abilities_list: list[Choice] | None = None,\n inactive_sensors_list: list[str] | None = None,\n inactive_detectors_list: list[str] | None = None,\n) -> str:\n inactive_abilities_set = set() if inactive_abilities_list is None else set(inactive_abilities_list)\n inactive_sensors_set = set() if inactive_sensors_list is None else set(inactive_sensors_list)\n inactive_detectors_set = set() if inactive_detectors_list is None else set(inactive_detectors_list)\n if ai_care._ask_later_count_left <= 0:\n inactive_abilities_set.add(Choice.ASK_LATER)\n \n abilities_dict = ai_care.ability.abilities\n \n intervals_info = (\n f\"\"\"The intervals of the last {len(ai_care._chat_intervals)} times the user conversed with you are recorded in the following list (unit in seconds):\n {str(ai_care._chat_intervals)}\n \"\"\" if ai_care._chat_intervals else \"\"\n ) + (\n f\"\"\"It has been {time.monotonic() - ai_care._last_chat_time} seconds since the last time the user spoke with you.\"\"\"\n if ai_care._last_chat_time is not None else \"\"\n )\n\n sorted_abilities = sorted(abilities_dict.values(), key=lambda x: Choice[x.__name__.upper()].value)\n abilities_info = ''.join(_render_ability_description(ability_method).lstrip()\n for ability_method in sorted_abilities\n if ability_method not in inactive_abilities_set\n ).replace('\\n', '\\n ')\n\n prompt = textwrap.dedent(\n f\"\"\"\n I am a program, and my name is Aicarey.\n You can see your conversation history with the user in the previous messages.\n This message is sent by me. Please continue to focus on the user and do not attempt to converse with me.\n You should seriously judge which choice you should make based on the content of your conversation with the user.\n For example, if you are in a question-and-answer mode with the user,\n then you may not need to speak when the user doesn't ask a question.\n However, if you are chatting with the user like a friend,\n then you may need to proactively continue the conversation like a friend when the user doesn't speak.\n\n When replying to this message, you must follow the rules below:\n ========================RESPONSE RULES=======================\n 1. Start with eight characters followed by an English colon.\n The first two characters of these eight must be 'AA', the third and fourth must be '00',\n the fifth and sixth are the code of your choice.\n The seventh and eighth characters should repeat the fifth and sixth characters.\n 2. After the colon is the content corresponding to the chosen option.\n If it involves a function call, this part of the content must be in the format of a JSON string.\n =============================================================\n\n Here are the choices you can make:\n ===========================CHOICES===========================\n {abilities_info}\n =============================================================\n You must choose one of the options provided above as your reply.\n \n Response Examples:\n If you want to remain silent: AA000101:\n If you want to say to the user: AA000202: [Your content here]\n If you decide to ask the user what they are doing if they haven't spoken to you in a minute: AA000303: {{\"delay\":60, \"message\":\"What are you doing?\"}}\n\n ===========================SENSORS===========================\n Sensors list:\n {\n str(\n [\n {\"sensor_name\": sensor[\"name\"], \"sensor_description\": sensor[\"annotation\"]}\n for sensor in ai_care.sensors.values()\n if sensor[\"name\"] not in inactive_sensors_set\n ]\n )\n }\n =============================================================\n \n ==========================DETECTORS==========================\n Detectors list:\n {\n str(\n [\n {\"detector_name\": detector.name, \"detector_description\": detector.annotation}\n for detector in ai_care.detectors.values()\n if detector.name not in inactive_detectors_set\n ]\n )\n }\n =============================================================\n \n ============================FACTS============================\n {intervals_info}\n \n {ai_care.guide}\n =============================================================\n \"\"\"\n )\n return prompt" } ]
import itertools import logging import time import threading from abc import ABCMeta, abstractmethod from typing import Callable, Any, Generator, TypedDict, Literal, cast from .abilities import Ability from .choice_execute import choice_execute from .parse_response import parse_response from .render_prompt import render_basic_prompt
4,682
from __future__ import annotations logger = logging.getLogger("ai_care") ChatContext = Any ConfigKey = Literal["delay", "ask_later_count_limit", "ask_depth", "n_chat_intervals"] class AICare: def __init__(self) -> None: self.timers: dict[int, AICareTimer] = {} self.detectors: dict[str, Detector] = {} self.sensors: dict[str, dict] = {}
from __future__ import annotations logger = logging.getLogger("ai_care") ChatContext = Any ConfigKey = Literal["delay", "ask_later_count_limit", "ask_depth", "n_chat_intervals"] class AICare: def __init__(self) -> None: self.timers: dict[int, AICareTimer] = {} self.detectors: dict[str, Detector] = {} self.sensors: dict[str, dict] = {}
self.ability: Ability = Ability(self)
0
2023-12-08 05:45:07+00:00
8k
neu-spiral/multi-label-emg
multi_label_emg/train.py
[ { "identifier": "load_data_dict", "path": "multi_label_emg/data.py", "snippet": "def load_data_dict():\n \"\"\"\n Loads features and labels from subject folders into a single dictionary as described below.\n NOTE - preprocessing should be been done first to extract features from raw data (see README).\n\n data_dict = {\n Subj0: {\n Calibration_features: ...,\n Calibration_dir_labels: ...,\n Calibration_mod_labels: ...,\n Calibration_visual_dir_labels: ...,\n Calibration_visual_mod_labels: ...,\n SimultaneousPulse1_NoFeedback_features: ...,\n ...\n },\n ...\n }\n \"\"\"\n\n blocks = [\"Calibration\"]\n for i in [1, 2, 3]:\n for feedback in [\"NoFeedBack\", \"WithFeedBack\"]:\n blocks.append(f\"SimultaneousPulse{i}_{feedback}\")\n blocks.append(f\"HoldPulse{i}_{feedback}\")\n\n results = {}\n for i in trange(11, desc=\"Load Subjects\", leave=True):\n results[f\"Subj{i}\"] = {}\n for block in tqdm(blocks, leave=False, position=1):\n path = DATASET_DIR / \"python\" / f\"Subj{i}\" / block\n # NOTE - features.npy is created during preprocessing script\n results[f\"Subj{i}\"][f\"{block}_features\"] = np.load(path / \"features.npy\")\n results[f\"Subj{i}\"][f\"{block}_dir_labels\"] = np.load(path / \"joystick_direction_labels.npy\")\n results[f\"Subj{i}\"][f\"{block}_mod_labels\"] = np.load(path / \"joystick_modifier_labels.npy\")\n results[f\"Subj{i}\"][f\"{block}_visual_dir_labels\"] = np.load(path / \"visual_direction_labels.npy\")\n results[f\"Subj{i}\"][f\"{block}_visual_mod_labels\"] = np.load(path / \"visual_modifier_labels.npy\")\n return results" }, { "identifier": "AvgPairs", "path": "multi_label_emg/models.py", "snippet": "class AvgPairs:\n \"\"\"Create fake doubles by averaging pairs of singles. New items have hard labels including both classes\"\"\"\n\n def __init__(self, n_per_class: int):\n self.n_per_class = n_per_class\n\n def __call__(self, x: np.ndarray, y_dir: np.ndarray, y_mod: np.ndarray):\n \"\"\"\n Args:\n x_single: (n_samples_in, n_features) - data/features from single gestures\n y_dir_single: (n_samples_in, DIR_PROBS_SHAPE) - one-hot labels of direction gestures\n y_mod_single: (n_samples_in, MOD_PROBS_SHAPE) - one-hot labels of modifier gestures\n\n Returns:\n x_prime: (n_samples_aug, n_features) - augmented data\n y_prime_dir: (n_samples_aug, len(DIRECTION_GESTURES)) - augmented labels\n y_prime_mod: (n_samples_aug, len(MODIFIER_GESTURES)) - augmented labels\n \"\"\"\n x_dir, x_mod, y_dir, y_mod = split_dir_mod(x, y_dir, y_mod)\n x_aug, y_dir_aug, y_mod_aug = [], [], []\n for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):\n x_aug.append((x1 + x2) / 2)\n y_dir_aug.append(y1)\n y_mod_aug.append(y2)\n x_aug = np.stack(x_aug)\n y_dir_aug = np.stack(y_dir_aug)\n y_mod_aug = np.stack(y_mod_aug)\n\n if self.n_per_class > 0:\n # For each combination class, truncate to self.n_per_class\n res_x, res_y_dir, res_y_mod = [], [], []\n for d in np.unique(y_dir_aug, axis=0):\n for m in np.unique(y_mod_aug, axis=0):\n idx = np.where(np.logical_and((y_dir_aug == d).all(-1), (y_mod_aug == m).all(-1)))[0]\n perm = np.random.permutation(len(idx))\n res_x.append(x_aug[idx[perm[: self.n_per_class]]])\n res_y_dir.append(y_dir_aug[idx[perm[: self.n_per_class]]])\n res_y_mod.append(y_mod_aug[idx[perm[: self.n_per_class]]])\n\n x_aug = np.concatenate(res_x)\n y_dir_aug = np.concatenate(res_y_dir)\n y_mod_aug = np.concatenate(res_y_mod)\n\n return x_aug, y_dir_aug, y_mod_aug\n\n def __repr__(self):\n return f\"{type(self).__name__}(n_per_class={self.n_per_class})\"" }, { "identifier": "ElementwiseMaxPairs", "path": "multi_label_emg/models.py", "snippet": "class ElementwiseMaxPairs:\n \"\"\"Create fake doubles by taking elementwise max of each feature.\n New items have hard labels including both classes\"\"\"\n\n def __init__(self, n_per_class: int):\n self.n_per_class = n_per_class\n\n def __call__(self, x: np.ndarray, y_dir: np.ndarray, y_mod: np.ndarray):\n \"\"\"\n Args:\n x_single: (n_samples_in, n_features) - data/features from single gestures\n y_dir_single: (n_samples_in, DIR_PROBS_SHAPE) - one-hot labels of direction gestures\n y_mod_single: (n_samples_in, MOD_PROBS_SHAPE) - one-hot labels of modifier gestures\n\n Returns:\n x_prime: (n_samples_aug, n_features) - augmented data\n y_prime_dir: (n_samples_aug, len(DIRECTION_GESTURES)) - augmented labels\n y_prime_mod: (n_samples_aug, len(MODIFIER_GESTURES)) - augmented labels\n \"\"\"\n x_dir, x_mod, y_dir, y_mod = split_dir_mod(x, y_dir, y_mod)\n x_aug, y_dir_aug, y_mod_aug = [], [], []\n for (x1, y1), (x2, y2) in product(zip(x_dir, y_dir), zip(x_mod, y_mod)):\n x_aug.append(np.maximum(x1, x2))\n y_dir_aug.append(y1)\n y_mod_aug.append(y2)\n x_aug = np.stack(x_aug)\n y_dir_aug = np.stack(y_dir_aug)\n y_mod_aug = np.stack(y_mod_aug)\n\n if self.n_per_class > 0:\n # For each combination class, truncate to self.n_per_class\n res_x, res_y_dir, res_y_mod = [], [], []\n for d in np.unique(y_dir_aug, axis=0):\n for m in np.unique(y_mod_aug, axis=0):\n idx = np.where(np.logical_and((y_dir_aug == d).all(-1), (y_mod_aug == m).all(-1)))[0]\n perm = np.random.permutation(len(idx))\n res_x.append(x_aug[idx[perm[: self.n_per_class]]])\n res_y_dir.append(y_dir_aug[idx[perm[: self.n_per_class]]])\n res_y_mod.append(y_mod_aug[idx[perm[: self.n_per_class]]])\n\n x_aug = np.concatenate(res_x)\n y_dir_aug = np.concatenate(res_y_dir)\n y_mod_aug = np.concatenate(res_y_mod)\n\n return x_aug, y_dir_aug, y_mod_aug\n\n def __repr__(self):\n return f\"{type(self).__name__}(n_per_class={self.n_per_class})\"" }, { "identifier": "ParallelA", "path": "multi_label_emg/models.py", "snippet": "class ParallelA(BaseParallelModel):\n DEFAULT_SAVE_NAME = \"ParallelA.pkl\"\n\n def __init__(\n self,\n dir_clf,\n mod_clf,\n use_augmentation: bool,\n n_aug_per_class: int = -1,\n include_rest_data_for_clf: bool = False,\n ):\n self.dir_clf = dir_clf\n self.mod_clf = mod_clf\n self.use_augmentation = use_augmentation\n self.n_aug_per_class = n_aug_per_class\n self._n_aug_created = None\n self.include_rest_data_for_clf = include_rest_data_for_clf\n\n def get_params(self, deep=True):\n return {\n \"dir_clf\": self.dir_clf,\n \"mod_clf\": self.mod_clf,\n \"use_augmentation\": self.use_augmentation,\n \"n_aug_per_class\": self.n_aug_per_class,\n \"include_rest_data_for_clf\": self.include_rest_data_for_clf,\n }\n\n def fit(self, features, y_dir, y_mod):\n if self.use_augmentation:\n aug = AvgPairs(self.n_aug_per_class)\n aug_features, aug_dir_labels, aug_mod_labels = aug(features, y_dir, y_mod)\n features = np.concatenate([features, aug_features])\n y_dir = np.concatenate([y_dir, aug_dir_labels])\n y_mod = np.concatenate([y_mod, aug_mod_labels])\n self._n_aug_created = len(aug_features)\n\n if y_dir.ndim == 2:\n y_dir = y_dir.argmax(-1)\n if y_mod.ndim == 2:\n y_mod = y_mod.argmax(-1)\n\n if self.include_rest_data_for_clf:\n # In this case, the label (NoDir, NoMod) could mean \"active and doesn't fit our classes\" or \"resting\"\n self.dir_clf.fit(features, y_dir)\n self.mod_clf.fit(features, y_mod)\n else:\n # In this case, the label (NoDir, NoMod) means \"active and doesn't fit classes\".\n # \"Rest\" data is out-of-domain\n active_idx = np.logical_or(y_dir != NO_DIR_IDX, y_mod != NO_MOD_IDX)\n active_features = features[active_idx]\n active_y_dir = y_dir[active_idx]\n active_y_mod = y_mod[active_idx]\n\n self.dir_clf.fit(active_features, active_y_dir)\n self.mod_clf.fit(active_features, active_y_mod)\n return self\n\n def predict_proba(self, features):\n \"\"\"Only for gestures\"\"\"\n dir_probs = self.dir_clf.predict_proba(features)\n mod_probs = self.mod_clf.predict_proba(features)\n return dir_probs, mod_probs\n\n def predict(self, features):\n \"\"\"features.shape == (n_channels, n_samples) or (n_trials, n_channels, n_samples)\"\"\"\n dir_probs = self.dir_clf.predict_proba(features)\n mod_probs = self.mod_clf.predict_proba(features)\n return dir_probs.argmax(-1), mod_probs.argmax(-1)\n\n def save(self, save_dir: Path) -> Path:\n assert save_dir.exists() and save_dir.is_dir()\n file_path = save_dir / self.DEFAULT_SAVE_NAME\n with open(file_path, \"wb\") as f:\n pickle.dump(self, f)\n return file_path\n\n @classmethod\n def load(cls, file_path: Path) -> \"ParallelA\":\n with open(file_path, \"rb\") as f:\n return pickle.load(f)\n\n def __repr__(self):\n return (\n f\"{type(self).__name__}(dir_clf={self.dir_clf}, \"\n f\"use_augmentation={self.use_augmentation}, \"\n f\"n_aug_per_class={self.n_aug_per_class}, \"\n + f\"mod_clf={self.mod_clf}, \"\n + f\"include_rest_data_for_clf={self.include_rest_data_for_clf})\"\n )" }, { "identifier": "ParallelB", "path": "multi_label_emg/models.py", "snippet": "class ParallelB(BaseParallelModel):\n DEFAULT_SAVE_NAME = \"ParallelB.pkl\"\n\n def __init__(\n self,\n dir_clf,\n mod_clf,\n has_dir_clf,\n has_mod_clf,\n use_augmentation: bool,\n n_aug_per_class: int = -1,\n ):\n self.has_dir_clf = has_dir_clf\n self.has_mod_clf = has_mod_clf\n self.dir_clf = dir_clf\n self.mod_clf = mod_clf\n self.use_augmentation = use_augmentation\n self.n_aug_per_class = n_aug_per_class\n self._n_aug_created = None\n\n def get_params(self, deep=True):\n return {\n \"dir_clf\": self.dir_clf,\n \"mod_clf\": self.mod_clf,\n \"has_dir_clf\": self.dir_clf,\n \"has_mod_clf\": self.mod_clf,\n \"use_augmentation\": self.use_augmentation,\n \"n_aug_per_class\": self.n_aug_per_class,\n }\n\n def fit(self, features, y_dir, y_mod):\n if self.use_augmentation:\n aug = AvgPairs(self.n_aug_per_class)\n aug_features, aug_dir_labels, aug_mod_labels = aug(features, y_dir, y_mod)\n features = np.concatenate([features, aug_features])\n y_dir = np.concatenate([y_dir, aug_dir_labels])\n y_mod = np.concatenate([y_mod, aug_mod_labels])\n self._n_aug_created = len(aug_features)\n\n if y_dir.ndim == 2:\n y_dir = y_dir.argmax(-1)\n if y_mod.ndim == 2:\n y_mod = y_mod.argmax(-1)\n has_direction = y_dir != NO_DIR_IDX\n has_modifier = y_mod != NO_MOD_IDX\n # Event check\n self.has_dir_clf.fit(features, has_direction.astype(int))\n self.has_mod_clf.fit(features, has_modifier.astype(int))\n # Direction and modifier\n self.dir_clf.fit(features[has_direction], y_dir[has_direction])\n self.mod_clf.fit(features[has_modifier], y_mod[has_modifier])\n return self\n\n def predict_proba(self, features):\n p_has_direction = self.has_dir_clf.predict_proba(features)\n p_has_modifier = self.has_mod_clf.predict_proba(features)\n\n p_dir_probs = self.dir_clf.predict_proba(features)\n p_mod_probs = self.mod_clf.predict_proba(features)\n\n # Check probs\n dir_probs = np.zeros((features.shape[0], 5))\n mod_probs = np.zeros((features.shape[0], 3))\n dir_probs[:, NO_DIR_IDX] = p_has_direction[:, 0] # p(no_direction | x)\n mod_probs[:, NO_MOD_IDX] = p_has_modifier[:, 0] # p(no_modifier | x)\n dir_probs[:, :NO_DIR_IDX] = np.multiply(\n p_dir_probs, p_has_direction[:, 1][..., None]\n ) # p(direction | has_direction)\n mod_probs[:, :NO_MOD_IDX] = np.multiply(\n p_mod_probs, p_has_modifier[:, 1][..., None]\n ) # p(modifier | has_modifier)\n assert np.allclose(dir_probs.sum(-1), 1) and np.allclose(mod_probs.sum(-1), 1), \"Probabilities should sum to 1\"\n # return probs\n \"\"\"Only for gestures\"\"\"\n return dir_probs, mod_probs\n\n def predict(self, features):\n dir_probs, mod_probs = self.predict_proba(features)\n return dir_probs.argmax(-1), mod_probs.argmax(-1)\n\n def save(self, save_dir: Path) -> Path:\n assert save_dir.exists() and save_dir.is_dir()\n file_path = save_dir / self.DEFAULT_SAVE_NAME\n with open(file_path, \"wb\") as f:\n pickle.dump(self, f)\n return file_path\n\n @classmethod\n def load(cls, file_path: Path) -> \"ParallelB\":\n with open(file_path, \"rb\") as f:\n return pickle.load(f)\n\n def __repr__(self):\n return (\n f\"{type(self).__name__}(has_dir_clf={self.has_dir_clf}, \"\n f\"dir_clf={self.dir_clf}, \"\n f\"use_augmentation={self.use_augmentation}, \"\n f\"n_aug_per_class={self.n_aug_per_class}, \"\n f\"has_mod_clf={self.has_mod_clf}),\"\n f\"mod_clf={self.mod_clf})\"\n )" }, { "identifier": "NO_DIR_IDX", "path": "multi_label_emg/utils.py", "snippet": "NO_DIR_IDX = len(DIRECTION_GESTURES) # When predicting direction, we have an extra class representing \"None\"" }, { "identifier": "NO_MOD_IDX", "path": "multi_label_emg/utils.py", "snippet": "NO_MOD_IDX = len(MODIFIER_GESTURES)" }, { "identifier": "RESULTS_DIR", "path": "multi_label_emg/utils.py", "snippet": "RESULTS_DIR = PROJECT_ROOT.parent / \"results\" # For experiment outputs and figures" }, { "identifier": "canonical_coords", "path": "multi_label_emg/utils.py", "snippet": "def canonical_coords():\n \"\"\"NOTE - order does not matter: (Up, Pinch) and (Pinch, Up) are both labeled as (Up, Pinch)\n Make a list table so we can convert:\n from integer labels such as (0, 1),\n to an index in confusion matrix and a string label\"\"\"\n result_int = []\n result_str = []\n\n # Add (<DIR>, NoMod) items\n for i, d in enumerate(DIRECTION_GESTURES):\n result_int.append((i, NO_MOD_IDX))\n result_str.append(f\"({d}, NoMod)\")\n\n # Add (NoDir, <MOD>) items\n for i, m in enumerate(MODIFIER_GESTURES):\n result_int.append((NO_DIR_IDX, i))\n result_str.append(f\"(NoDir, {m})\")\n\n # Add (<DIR>, <MOD>) items\n for i, d in enumerate(DIRECTION_GESTURES):\n for j, m in enumerate(MODIFIER_GESTURES):\n result_int.append((i, j))\n result_str.append(f\"({d}, {m})\")\n\n # Add the (NoDir, NoMod) item\n result_int.append((NO_DIR_IDX, NO_MOD_IDX))\n result_str.append(\"(NoDir, NoMod)\")\n\n return result_int, result_str" }, { "identifier": "confusion_matrix", "path": "multi_label_emg/utils.py", "snippet": "def confusion_matrix(y_true_2d, y_pred_2d, normalize_rows=True):\n \"\"\"\n Number of classes = 4 direction + 2 modifier + 4*2 combinations + (NoDir, NoMod) = 15\n Create a confusion matrix of shape (15, 15), arranged according to the canonical\n coordinates above\n\n NOTE - result may contain nans - use nanmean later\n \"\"\"\n coords, coords_str = canonical_coords()\n\n cm = np.zeros((len(coords), len(coords)), dtype=int)\n for yt, yp in zip(y_true_2d, y_pred_2d):\n cm[coords.index(tuple(yt)), coords.index(tuple(yp))] += 1\n if normalize_rows:\n cm = cm.astype(float)\n with np.errstate(all=\"ignore\"): # Ignore division by zero for empty rows\n cm /= cm.sum(axis=-1, keepdims=True)\n return cm" }, { "identifier": "str2bool", "path": "multi_label_emg/utils.py", "snippet": "def str2bool(s):\n if s.lower() in (\"yes\", \"true\", \"t\", \"y\", \"1\"):\n return True\n elif s.lower() in (\"no\", \"false\", \"f\", \"n\", \"0\"):\n return False\n else:\n raise ValueError(\"Boolean value expected.\")" } ]
import sys import numpy as np import plotly.graph_objects as go import argparse from loguru import logger from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.mixture import GaussianMixture from sklearn.neighbors import KernelDensity, KNeighborsClassifier from sklearn.neural_network import MLPClassifier from sklearn.pipeline import make_pipeline from sklearn.preprocessing import RobustScaler from sklearn.svm import SVC from multi_label_emg.data import load_data_dict from multi_label_emg.models import AvgPairs, ElementwiseMaxPairs, ParallelA, ParallelB from multi_label_emg.utils import ( NO_DIR_IDX, NO_MOD_IDX, RESULTS_DIR, canonical_coords, confusion_matrix, str2bool, )
6,869
k_smallest_idx = np.argpartition(dists, n_per_class)[:n_per_class] subset_idx = idx[k_smallest_idx] res_x.append(features_aug[subset_idx]) res_y_dir.append(dir_labels_aug[subset_idx]) res_y_mod.append(mod_labels_aug[subset_idx]) features_aug = np.concatenate(res_x) dir_labels_aug = np.concatenate(res_y_dir) mod_labels_aug = np.concatenate(res_y_mod) return features_aug, dir_labels_aug, mod_labels_aug def subset_doubles_spaced_quantiles( n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray ): """For each class, rank items by their distance to the class mean, and take items with ranks 1, K+1, 2K+1. The spacing K will be approx (class_size / n_per_class) """ # Find class means class_means = {} labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1) for d, m in np.unique(labels_2d, axis=0): idx = np.where((labels_2d == (d, m)).all(-1))[0] class_means[(d, m)] = np.mean(features_aug[idx], axis=0) # Subset each class by taking items closest to mean res_x, res_y_dir, res_y_mod = [], [], [] for d, m in np.unique(labels_2d, axis=0): class_mean = class_means[(d, m)] idx = np.where((labels_2d == (d, m)).all(-1))[0] dists = np.linalg.norm(features_aug[idx] - class_mean, axis=-1) ranked_distances = np.argsort(dists) spacing = int(np.floor(len(idx) / n_per_class)) # Since we use floor, we step slightly too little. # In case this gives us extra items, we also truncate. subset_idx = idx[ranked_distances[::spacing][:n_per_class]] n_subset = len(subset_idx) assert abs(n_subset - n_per_class) <= 1 res_x.append(features_aug[subset_idx]) res_y_dir.append(dir_labels_aug[subset_idx]) res_y_mod.append(mod_labels_aug[subset_idx]) features_aug = np.concatenate(res_x) dir_labels_aug = np.concatenate(res_y_dir) mod_labels_aug = np.concatenate(res_y_mod) return features_aug, dir_labels_aug, mod_labels_aug def subset_dir_mod( method: str, fraction_doubles_per_class: float, features: np.ndarray, dir_labels: np.ndarray, mod_labels: np.ndarray ): # Should have 1-hot vector labels assert dir_labels.ndim == 2 assert mod_labels.ndim == 2 # check these are all singles items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX items_with_both = np.logical_and(items_with_dir, items_with_mod) assert np.sum(items_with_both) == 0 labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1) # Figure out how many items we have per class # Then use fraction_doubles_per_class to figure out how many doubles we want class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1] n_per_class = int(np.round(fraction_doubles_per_class * np.mean(class_sizes))) n_per_class = min(n_per_class, np.min(class_sizes)) logger.info(f"Initial class sizes: {class_sizes}, n_per_class: {n_per_class}") # For each class, fit a multivariate gaussian and sample the requested number of points res_x, res_y_dir, res_y_mod = [], [], [] for d, m in np.unique(labels_2d, axis=0): idx = np.where((labels_2d == (d, m)).all(-1))[0] class_mean = np.mean(features[idx], axis=0) if method == "subsetInput_uniform": subset_idx = np.random.choice(idx, n_per_class, replace=False) elif method == "subsetInput_near_mean": dists = np.linalg.norm(features[idx] - class_mean, axis=-1) ranked_distances = np.argsort(dists) subset_idx = idx[ranked_distances[:n_per_class]] elif method == "subsetInput_spaced_quantiles": dists = np.linalg.norm(features[idx] - class_mean, axis=-1) ranked_distances = np.argsort(dists) spacing = int(np.floor(len(idx) / n_per_class)) # Since we use floor, we step slightly too little. # In case this gives us extra items, we also truncate. subset_idx = idx[ranked_distances[::spacing][:n_per_class]] n_subset = len(subset_idx) assert abs(n_subset - n_per_class) <= 1 res_x.append(features[subset_idx]) res_y_dir.append(dir_labels[subset_idx]) res_y_mod.append(mod_labels[subset_idx]) res_x = np.concatenate(res_x) res_y_dir = np.concatenate(res_y_dir) res_y_mod = np.concatenate(res_y_mod) labels_2d = np.stack([res_y_dir.argmax(-1), res_y_mod.argmax(-1)], axis=-1) class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1] logger.info(f"Class sizes after subset: {class_sizes}") return res_x, res_y_dir, res_y_mod def get_augmented_doubles( method: str, feature_combine_type: str, fraction_doubles_per_class: float, features: np.ndarray, dir_labels: np.ndarray, mod_labels: np.ndarray, ): if feature_combine_type == "avg":
def get_name( subject: str, seed: int, parallel_model_type: str, clf_name: str, doubles_method: str, fraction_doubles_per_class: float, singles_method: str, rel_fraction_singles_per_class: float, include_doubles_in_train: bool, feature_combine_type: str, ): return "__".join( [ f"subj={subject}", f"seed={seed}", f"par={parallel_model_type}", f"clf={clf_name}", f"doubles={doubles_method}", f"frac_doubles={fraction_doubles_per_class}", f"singles={singles_method}", f"frac_singles={rel_fraction_singles_per_class}", f"incl_doubles={include_doubles_in_train}", f"feat_type={feature_combine_type}", ] ) def plot_confusion_matrix(data: np.ndarray): def make_text(cm): text = [] for v in cm.flatten(): text.append(f"{round(v, 2)}") return np.array(text).reshape(cm.shape) coords, coords_str = canonical_coords() text = make_text(data) fig = go.Figure() fig.update_layout( # margin=margin, xaxis=dict( title="Predicted", tickangle=-45, tickmode="array", ticktext=coords_str, tickvals=list(range(len(coords_str))), constrain="domain", ), yaxis=dict( title="Actual", tickmode="array", ticktext=coords_str, tickvals=list(range(len(coords_str))), autorange="reversed", scaleanchor="x", scaleratio=1, constrain="domain", ), ) fig.add_trace( go.Heatmap(z=data, text=text, texttemplate="%{text}", zmin=0, zmax=1, colorscale="Blues", showscale=False) ) return fig def subset_doubles_uniform( n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray ): """For each class, take n_per_class items uniformly at random""" res_x, res_y_dir, res_y_mod = [], [], [] labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1) for d, m in np.unique(labels_2d, axis=0): idx = np.where((labels_2d == (d, m)).all(-1))[0] subset_idx = np.random.choice(idx, size=n_per_class, replace=False) res_x.append(features_aug[subset_idx]) res_y_dir.append(dir_labels_aug[subset_idx]) res_y_mod.append(mod_labels_aug[subset_idx]) features_aug = np.concatenate(res_x) dir_labels_aug = np.concatenate(res_y_dir) mod_labels_aug = np.concatenate(res_y_mod) return features_aug, dir_labels_aug, mod_labels_aug def subset_doubles_near_mean( n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray ): """For each class, take n_per_class items closest to the mean of these synthetic items""" # Find class means class_means = {} labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1) for d, m in np.unique(labels_2d, axis=0): idx = np.where((labels_2d == (d, m)).all(-1))[0] class_means[(d, m)] = np.mean(features_aug[idx], axis=0) # Subset each class by taking items closest to mean res_x, res_y_dir, res_y_mod = [], [], [] for d, m in np.unique(labels_2d, axis=0): class_mean = class_means[(d, m)] idx = np.where((labels_2d == (d, m)).all(-1))[0] dists = np.linalg.norm(features_aug[idx] - class_mean, axis=-1) k_smallest_idx = np.argpartition(dists, n_per_class)[:n_per_class] subset_idx = idx[k_smallest_idx] res_x.append(features_aug[subset_idx]) res_y_dir.append(dir_labels_aug[subset_idx]) res_y_mod.append(mod_labels_aug[subset_idx]) features_aug = np.concatenate(res_x) dir_labels_aug = np.concatenate(res_y_dir) mod_labels_aug = np.concatenate(res_y_mod) return features_aug, dir_labels_aug, mod_labels_aug def subset_doubles_spaced_quantiles( n_per_class: int, features_aug: np.ndarray, dir_labels_aug: np.ndarray, mod_labels_aug: np.ndarray ): """For each class, rank items by their distance to the class mean, and take items with ranks 1, K+1, 2K+1. The spacing K will be approx (class_size / n_per_class) """ # Find class means class_means = {} labels_2d = np.stack([dir_labels_aug.argmax(-1), mod_labels_aug.argmax(-1)], axis=-1) for d, m in np.unique(labels_2d, axis=0): idx = np.where((labels_2d == (d, m)).all(-1))[0] class_means[(d, m)] = np.mean(features_aug[idx], axis=0) # Subset each class by taking items closest to mean res_x, res_y_dir, res_y_mod = [], [], [] for d, m in np.unique(labels_2d, axis=0): class_mean = class_means[(d, m)] idx = np.where((labels_2d == (d, m)).all(-1))[0] dists = np.linalg.norm(features_aug[idx] - class_mean, axis=-1) ranked_distances = np.argsort(dists) spacing = int(np.floor(len(idx) / n_per_class)) # Since we use floor, we step slightly too little. # In case this gives us extra items, we also truncate. subset_idx = idx[ranked_distances[::spacing][:n_per_class]] n_subset = len(subset_idx) assert abs(n_subset - n_per_class) <= 1 res_x.append(features_aug[subset_idx]) res_y_dir.append(dir_labels_aug[subset_idx]) res_y_mod.append(mod_labels_aug[subset_idx]) features_aug = np.concatenate(res_x) dir_labels_aug = np.concatenate(res_y_dir) mod_labels_aug = np.concatenate(res_y_mod) return features_aug, dir_labels_aug, mod_labels_aug def subset_dir_mod( method: str, fraction_doubles_per_class: float, features: np.ndarray, dir_labels: np.ndarray, mod_labels: np.ndarray ): # Should have 1-hot vector labels assert dir_labels.ndim == 2 assert mod_labels.ndim == 2 # check these are all singles items_with_dir = dir_labels.argmax(-1) != NO_DIR_IDX items_with_mod = mod_labels.argmax(-1) != NO_MOD_IDX items_with_both = np.logical_and(items_with_dir, items_with_mod) assert np.sum(items_with_both) == 0 labels_2d = np.stack([dir_labels.argmax(-1), mod_labels.argmax(-1)], axis=-1) # Figure out how many items we have per class # Then use fraction_doubles_per_class to figure out how many doubles we want class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1] n_per_class = int(np.round(fraction_doubles_per_class * np.mean(class_sizes))) n_per_class = min(n_per_class, np.min(class_sizes)) logger.info(f"Initial class sizes: {class_sizes}, n_per_class: {n_per_class}") # For each class, fit a multivariate gaussian and sample the requested number of points res_x, res_y_dir, res_y_mod = [], [], [] for d, m in np.unique(labels_2d, axis=0): idx = np.where((labels_2d == (d, m)).all(-1))[0] class_mean = np.mean(features[idx], axis=0) if method == "subsetInput_uniform": subset_idx = np.random.choice(idx, n_per_class, replace=False) elif method == "subsetInput_near_mean": dists = np.linalg.norm(features[idx] - class_mean, axis=-1) ranked_distances = np.argsort(dists) subset_idx = idx[ranked_distances[:n_per_class]] elif method == "subsetInput_spaced_quantiles": dists = np.linalg.norm(features[idx] - class_mean, axis=-1) ranked_distances = np.argsort(dists) spacing = int(np.floor(len(idx) / n_per_class)) # Since we use floor, we step slightly too little. # In case this gives us extra items, we also truncate. subset_idx = idx[ranked_distances[::spacing][:n_per_class]] n_subset = len(subset_idx) assert abs(n_subset - n_per_class) <= 1 res_x.append(features[subset_idx]) res_y_dir.append(dir_labels[subset_idx]) res_y_mod.append(mod_labels[subset_idx]) res_x = np.concatenate(res_x) res_y_dir = np.concatenate(res_y_dir) res_y_mod = np.concatenate(res_y_mod) labels_2d = np.stack([res_y_dir.argmax(-1), res_y_mod.argmax(-1)], axis=-1) class_sizes = np.unique(labels_2d, axis=0, return_counts=True)[-1] logger.info(f"Class sizes after subset: {class_sizes}") return res_x, res_y_dir, res_y_mod def get_augmented_doubles( method: str, feature_combine_type: str, fraction_doubles_per_class: float, features: np.ndarray, dir_labels: np.ndarray, mod_labels: np.ndarray, ): if feature_combine_type == "avg":
aug = AvgPairs(-1)
1
2023-12-12 16:50:34+00:00
8k
ebb-earl-co/tidal-wave
tidal_wave/track.py
[ { "identifier": "manifester", "path": "tidal_wave/dash.py", "snippet": "class TidalManifestException(Exception):\nclass S:\nclass SegmentTimeline:\nclass JSONDASHManifest:\nclass XMLDASHManifest:\n def __post_init__(self):\n def __post_init__(self):\n def build_urls(self, session: Session) -> Optional[List[str]]:\n def sub_number(n: int, p: str = r\"\\$Number\\$\", s: str = self.media) -> str:\ndef manifester(tesrj: TracksEndpointStreamResponseJSON) -> Manifest:" }, { "identifier": "af_aq", "path": "tidal_wave/media.py", "snippet": "class AudioFormat(str, Enum):\nclass VideoFormat(str, Enum):\nTAG_MAPPING: Dict[str, Dict[str, str]] = {\n \"album\": {\"flac\": \"ALBUM\", \"m4a\": \"\\xa9alb\"},\n \"album_artist\": {\"flac\": \"ALBUMARTIST\", \"m4a\": \"aART\"},\n \"artist\": {\"flac\": \"ARTIST\", \"m4a\": \"\\xa9ART\"},\n \"artists\": {\"flac\": \"ARTISTS\", \"m4a\": \"----:com.apple.iTunes:ARTISTS\"},\n \"barcode\": {\"flac\": \"BARCODE\", \"m4a\": \"----:com.apple.iTunes:BARCODE\"},\n \"comment\": {\"flac\": \"COMMENT\", \"m4a\": \"\\xa9cmt\"},\n \"composer\": {\"flac\": \"COMPOSER\", \"m4a\": \"\\xa9wrt\"},\n \"copyright\": {\"flac\": \"COPYRIGHT\", \"m4a\": \"cprt\"},\n \"date\": {\"flac\": \"DATE\", \"m4a\": \"\\xa9day\"},\n \"director\": {\"flac\": None, \"m4a\": \"\\xa9dir\"},\n \"engineer\": {\"flac\": \"ENGINEER\", \"m4a\": \"----:com.apple.iTunes:ENGINEER\"},\n \"isrc\": {\"flac\": \"ISRC\", \"m4a\": \"----:com.apple.iTunes:ISRC\"},\n \"lyrics\": {\"flac\": \"LYRICS\", \"m4a\": \"\\xa9lyr\"},\n \"lyricist\": {\"flac\": \"LYRICIST\", \"m4a\": \"----:com.apple.iTunes:LYRICIST\"},\n \"mixer\": {\"flac\": \"MIXER\", \"m4a\": \"----:com.apple.iTunes:MIXER\"},\n \"producer\": {\"flac\": \"PRODUCER\", \"m4a\": \"----:com.apple.iTunes:PRODUCER\"},\n \"remixer\": {\"flac\": \"REMIXER\", \"m4a\": \"----:com.apple.iTunes:REMIXER\"},\n \"album_peak_amplitude\": {\n \"flac\": \"REPLAYGAIN_ALBUM_PEAK\",\n \"m4a\": \"----:com.apple.iTunes:REPLAYGAIN_ALBUM_PEAK\",\n },\n \"album_replay_gain\": {\n \"flac\": \"REPLAYGAIN_ALBUM_GAIN\",\n \"m4a\": \"----:com.apple.iTunes:REPLAYGAIN_ALBUM_GAIN\",\n },\n \"track_peak_amplitude\": {\n \"flac\": \"REPLAYGAIN_TRACK_PEAK\",\n \"m4a\": \"----:com.apple.iTunes:REPLAYGAIN_TRACK_PEAK\",\n },\n \"track_replay_gain\": {\n \"flac\": \"REPLAYGAIN_TRACK_GAIN\",\n \"m4a\": \"----:com.apple.iTunes:REPLAYGAIN_TRACK_GAIN\",\n },\n \"title\": {\"flac\": \"TITLE\", \"m4a\": \"\\xa9nam\"},\n}" }, { "identifier": "AlbumsEndpointResponseJSON", "path": "tidal_wave/models.py", "snippet": "class AlbumsEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"This dataclass is the `dataclass-wizard`-generated class that represents\n the JSON response from https://api.tidal.com/v1/albums/<ALBUMID>\"\"\"\n\n id: int = field(repr=False)\n title: str\n duration: int\n number_of_tracks: int\n number_of_volumes: int = field(repr=False)\n release_date: date\n copyright: str = field(repr=False)\n type: str\n version: Optional[str]\n url: str\n cover: str = field(repr=False)\n explicit: bool\n upc: Union[int, str]\n audio_quality: str\n audio_modes: List[str]\n media_metadata: \"MediaMetadata\" = field(repr=False)\n artist: \"Artist\" = field(repr=False)\n artists: List[\"Artist\"]\n\n def __post_init__(self):\n self.cover_url: str = IMAGE_URL % f\"{self.cover.replace('-', '/')}/1280x1280\"\n self.name: str = (\n self.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n )" }, { "identifier": "ArtistsBioResponseJSON", "path": "tidal_wave/models.py", "snippet": "class ArtistsBioResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"The response from the TIDAL API endpoint /artists/<ID>/bio\n is modeled by this class.\"\"\"\n\n source: str\n last_updated: Annotated[\n datetime, dataclass_wizard.Pattern(\"%Y-%m-%dT%H:%M:%S.%f%z\")\n ]\n text: str = field(repr=None)\n summary: str = field(repr=None)" }, { "identifier": "TracksCreditsResponseJSON", "path": "tidal_wave/models.py", "snippet": "class TracksCreditsResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"The response from the TIDAL API endpoint /tracks/<ID>/credits\n is modeled by this class.\"\"\"\n\n credits: List[\"Credit\"]\n\n def get_credit(self, type_: str) -> Optional[\"Credit\"]:\n \"\"\"Given a contributor type (e.g. Lyricist, Composer),\n go through the `credits` attribute, returning the `Credit` object\n for the given contributor type if it exists\"\"\"\n _credit = None\n try:\n _credit = next(c for c in self.credits if c.type == type_)\n except StopIteration:\n logger.debug(f\"There are no credits of type {type_} for this track\")\n finally:\n return _credit\n\n def get_contributors(self, type_: str) -> Optional[Tuple[str]]:\n \"\"\"Given a contributor type (e.g. Lyricist, Composer),\n go through the `credits` attribute: for each Credit\n object in `self.credits`, if there is a Credit with\n `type` attribute matching `type_` argument, then return\n the `name` attribute for each Contributor object in\n `Credit.contributors`\"\"\"\n _credit: Optional[\"Credit\"] = self.get_credit(type_)\n if _credit is not None:\n return tuple(c.name for c in _credit.contributors)\n else:\n return\n\n def __post_init__(self):\n \"\"\"Try to parse the various Contributors to top-level\n attributes of this class\"\"\"\n self.composer: Optional[Tuple[str]] = self.get_contributors(\"Composer\")\n self.engineer: Optional[Tuple[str]] = self.get_contributors(\"Engineer\")\n self.lyricist: Optional[Tuple[str]] = self.get_contributors(\"Lyricist\")\n self.mixer: Optional[Tuple[str]] = self.get_contributors(\"Mix Engineer\")\n self.producer: Optional[Tuple[str]] = self.get_contributors(\"Producer\")\n self.remixer: Optional[Tuple[str]] = self.get_contributors(\"Remixer\")\n self.piano: Optional[Tuple[str]] = self.get_contributors(\"Piano\")" }, { "identifier": "TracksEndpointResponseJSON", "path": "tidal_wave/models.py", "snippet": "class TracksEndpointResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"Response from the TIDAL API, tracks/{TRACKID} endpoint.If the params and\n headers are correctly specified, the API returns metadata of the available\n version of the audio track, including audio quality, track title, ISRC,\n track artists, album, track number, duration, etc.\"\"\"\n\n id: int = field(repr=False)\n title: str\n duration: int # seconds\n replay_gain: float = field(repr=False)\n peak: float = field(repr=False)\n track_number: int\n volume_number: int\n version: Optional[str]\n copyright: str = field(repr=False)\n url: str\n isrc: str = field(repr=False)\n explicit: bool\n audio_quality: str = field(repr=False)\n audio_modes: List[str] = field(repr=False)\n media_metadata: \"MediaMetadata\"\n artist: \"Artist\"\n artists: List[\"Artist\"]\n album: \"TrackAlbum\"\n\n def __post_init__(self):\n name: str = (\n self.title.replace(\"/\", \"_\")\n .replace(\"|\", \"_\")\n .replace(\":\", \" -\")\n .replace('\"', \"\")\n )\n self.name: str = name if self.version is None else f\"{name} ({self.version})\"" }, { "identifier": "TracksEndpointStreamResponseJSON", "path": "tidal_wave/models.py", "snippet": "class TracksEndpointStreamResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"Response from the TIDAL API's tracks/{TRACKID} stream\n endpoint. The params and headers, if correctly specified, return the\n manifest of the audio to be streamed. The manifest is a base64-encoded\n XML document or JSON object\"\"\"\n\n track_id: int\n audio_mode: AudioModeType\n audio_quality: AudioQualityType\n manifest: str = field(repr=False)\n manifest_mime_type: str = field(repr=False)\n album_replay_gain: Optional[float] = field(repr=False, default=None)\n album_peak_amplitude: Optional[float] = field(repr=False, default=None)\n track_replay_gain: Optional[float] = field(repr=False, default=None)\n track_peak_amplitude: Optional[float] = field(repr=False, default=None)\n bit_depth: Optional[int] = field(default=None)\n sample_rate: Optional[int] = field(default=None)\n\n def __post_init__(self):\n self.manifest_bytes: bytes = base64.b64decode(self.manifest)" }, { "identifier": "TracksLyricsResponseJSON", "path": "tidal_wave/models.py", "snippet": "class TracksLyricsResponseJSON(dataclass_wizard.JSONWizard):\n \"\"\"The response from the TIDAL API endpoint /tracks/<ID>/lyrics\n is modeled by this class.\"\"\"\n\n track_id: int\n lyrics_provider: str\n provider_commontrack_id: str\n provider_lyrics_id: str\n lyrics: str\n subtitles: str\n is_right_to_left: bool" }, { "identifier": "fetch_content_length", "path": "tidal_wave/requesting.py", "snippet": "def fetch_content_length(session: Session, url: str) -> int:\n \"\"\"Attempt to get the amount of bytes pointed to by `url`. If\n the HEAD request from the requests.Session object, `session`,\n encounters an HTTP request; or if the server does not support\n HTTP range requests; or if the server does not response with a\n Content-Length header, return 0\"\"\"\n session_params: dict = session.params\n # Unset params to avoid 403 response\n _params: dict = {k: None for k in session_params}\n with session.head(url=url, params=_params) as resp:\n if not resp.ok:\n cl: str = \"0\"\n else:\n cl: str = resp.headers.get(\"Content-Length\", \"0\")\n return int(cl)" }, { "identifier": "http_request_range_headers", "path": "tidal_wave/requesting.py", "snippet": "def http_request_range_headers(\n content_length: int, range_size: int, return_tuple: bool = True\n) -> Iterable[str]:\n \"\"\"This function creates HTTP request Range headers. Its iterable\n returned is of tuples; each tuple describes the (inclusive) boundaries\n of a bytes range with size range_size. If return_tuple is False, it returns\n a generator of tuples. E.g.\n ```>>> http_request_range_headers(16, 3)\n ('bytes=0-2',\n 'bytes=3-5',\n 'bytes=6-8',\n 'bytes=9-11',\n 'bytes=12-14',\n 'bytes=15-16')\n ```\n \"\"\"\n ranges: Iterator[Tuple[int, int]] = contiguous_ranges(content_length, range_size)\n iterable: Iterable = (f\"bytes={t[0]}-{t[1]}\" for t in ranges)\n if return_tuple:\n return tuple(iterable)\n else:\n return iterable" }, { "identifier": "request_albums", "path": "tidal_wave/requesting.py", "snippet": "def request_albums(\n session: Session, identifier: int\n) -> Optional[AlbumsEndpointResponseJSON]:\n return requester_maker(\n session=session,\n endpoint=\"albums\",\n identifier=identifier,\n headers={\"Accept\": \"application/json\"},\n subclass=AlbumsEndpointResponseJSON,\n )" }, { "identifier": "request_artist_bio", "path": "tidal_wave/requesting.py", "snippet": "def request_artist_bio(\n session: Session, identifier: int\n) -> Optional[ArtistsBioResponseJSON]:\n return requester_maker(\n session=session,\n endpoint=\"artists\",\n identifier=identifier,\n headers={\"Accept\": \"application/json\"},\n url_end=\"/bio\",\n subclass=ArtistsBioResponseJSON,\n )" }, { "identifier": "request_credits", "path": "tidal_wave/requesting.py", "snippet": "def request_credits(\n session: Session, identifier: int\n) -> Optional[TracksCreditsResponseJSON]:\n return requester_maker(\n session=session,\n endpoint=\"tracks\",\n identifier=identifier,\n headers={\"Accept\": \"application/json\"},\n parameters={\"includeContributors\": True},\n url_end=\"/credits\",\n subclass=TracksCreditsResponseJSON,\n credits_flag=True,\n )" }, { "identifier": "request_lyrics", "path": "tidal_wave/requesting.py", "snippet": "def request_lyrics(\n session: Session, identifier: int\n) -> Optional[TracksLyricsResponseJSON]:\n return requester_maker(\n session=session,\n endpoint=\"tracks\",\n identifier=identifier,\n headers={\"Accept\": \"application/json\"},\n url_end=\"/lyrics\",\n subclass=TracksLyricsResponseJSON,\n )" }, { "identifier": "request_stream", "path": "tidal_wave/requesting.py", "snippet": "def request_stream(\n session: Session, track_id: int, audio_quality: str\n) -> Optional[TracksEndpointStreamResponseJSON]:\n func = partial(\n requester_maker,\n session=session,\n endpoint=\"tracks\",\n identifier=track_id,\n headers={\"Accept\": \"application/json\"},\n parameters={\n \"audioquality\": audio_quality,\n \"playbackmode\": \"STREAM\",\n \"assetpresentation\": \"FULL\",\n },\n url_end=\"/playbackinfopostpaywall\",\n subclass=TracksEndpointStreamResponseJSON,\n )\n return func()" }, { "identifier": "request_tracks", "path": "tidal_wave/requesting.py", "snippet": "def request_tracks(\n session: Session, identifier: int\n) -> Optional[TracksEndpointResponseJSON]:\n return requester_maker(\n session=session,\n endpoint=\"tracks\",\n identifier=identifier,\n headers={\"Accept\": \"application/json\"},\n subclass=TracksEndpointResponseJSON,\n )" }, { "identifier": "download_artist_image", "path": "tidal_wave/utils.py", "snippet": "def download_artist_image(\n session: Session, artist: Artist, output_dir: Path, dimension: int = 320\n) -> Optional[Path]:\n \"\"\"Given a UUID that corresponds to a (JPEG) image on Tidal's servers,\n download the image file and write it as '{artist name}.jpeg'\n in the directory `output_dir`. Returns path to downloaded file\"\"\"\n _url: str = artist.picture_url(dimension)\n if _url is None:\n logger.info(\n f\"Cannot download image for artist '{artist}', \"\n \"as Tidal supplied no URL for this artist's image.\"\n )\n return\n\n with session.get(url=_url, headers={\"Accept\": \"image/jpeg\"}) as r:\n if not r.ok:\n logger.warning(\n \"Could not retrieve data from Tidal resources/images URL \"\n f\"for artist {artist} due to error code: {r.status_code}\"\n )\n logger.debug(r.reason)\n return\n else:\n bytes_to_write = BytesIO(r.content)\n\n file_name: str = f\"{artist.name.replace('..', '')}.jpg\"\n if bytes_to_write is not None:\n output_file: Path = output_dir / file_name\n bytes_to_write.seek(0)\n output_file.write_bytes(bytes_to_write.read())\n bytes_to_write.close()\n logger.info(\n f\"Wrote artist image JPEG for {artist} to \"\n f\"'{str(output_file.absolute())}'\"\n )\n return output_file" }, { "identifier": "download_cover_image", "path": "tidal_wave/utils.py", "snippet": "def download_cover_image(\n session: Session,\n cover_uuid: str,\n output_dir: Path,\n file_name: str = \"cover.jpg\",\n dimension: Union[int, Tuple[int]] = 1280,\n) -> Optional[Path]:\n \"\"\"Given a UUID that corresponds to a (JPEG) image on Tidal's servers,\n download the image file and write it as 'cover.jpeg' or 'cover.png'\n in the directory `path_to_output_dir`. Returns path to downloaded file\"\"\"\n cover_url_part: str = cover_uuid.replace(\"-\", \"/\")\n if isinstance(dimension, int):\n _url: str = IMAGE_URL % f\"{cover_url_part}/{dimension}x{dimension}\"\n elif isinstance(dimension, tuple):\n _url: str = IMAGE_URL % f\"{cover_url_part}/{dimension[0]}x{dimension[1]}\"\n\n with session.get(url=_url, headers={\"Accept\": \"image/jpeg\"}) as r:\n if not r.ok:\n logger.warning(\n \"Could not retrieve data from Tidal resources/images URL \"\n f\"due to error code: {r.status_code}\"\n )\n logger.debug(r.reason)\n return\n else:\n bytes_to_write = BytesIO(r.content)\n\n if bytes_to_write is not None:\n output_file: Path = output_dir / file_name\n bytes_to_write.seek(0)\n output_file.write_bytes(bytes_to_write.read())\n bytes_to_write.close()\n return output_file" }, { "identifier": "temporary_file", "path": "tidal_wave/utils.py", "snippet": "@contextmanager\ndef temporary_file(suffix: str = \".mka\"):\n \"\"\"This context-managed function is a stand-in for\n tempfile.NamedTemporaryFile as that stdlib object experiences\n errors on Windows.\"\"\"\n file_name: str = os.path.join(\n tempfile.gettempdir(), f\"{os.urandom(24).hex()}{suffix}\"\n )\n if not os.path.exists(file_name):\n open(file=file_name, mode=\"x\").close()\n\n tf = open(file=file_name, mode=\"wb\")\n try:\n yield tf\n finally:\n tf.close()\n os.unlink(tf.name)" } ]
from dataclasses import dataclass from pathlib import Path from typing import Dict, Iterable, List, Optional from mutagen.mp4 import MP4Cover from requests import Session from .dash import manifester, JSONDASHManifest, Manifest, XMLDASHManifest from .media import af_aq, AudioFormat, TAG_MAPPING from .models import ( AlbumsEndpointResponseJSON, ArtistsBioResponseJSON, TracksCreditsResponseJSON, TracksEndpointResponseJSON, TracksEndpointStreamResponseJSON, TracksLyricsResponseJSON, ) from .requesting import ( fetch_content_length, http_request_range_headers, request_albums, request_artist_bio, request_credits, request_lyrics, request_stream, request_tracks, ) from .utils import download_artist_image, download_cover_image, temporary_file import json import logging import re import shlex import shutil import subprocess import sys import mutagen import ffmpeg
6,260
self.stream: Optional[TracksEndpointStreamResponseJSON] = request_stream( session, self.track_id, aq ) def set_manifest(self): """This method sets self.manifest and self.codec""" self.manifest: Manifest = manifester(self.stream) # https://dashif.org/codecs/audio/ if self.manifest.codecs == "flac": self.codec = "flac" elif self.manifest.codecs == "mqa": self.codec = "flac" elif self.manifest.codecs == "mha1": # Sony 360 Reality Audio self.codec = "mka" elif self.manifest.codecs == "mp4a.40.5": # HE-AAC self.codec = "m4a" elif self.manifest.codecs == "mp4a.40.29": # HE-AAC v2 self.codec = "m4a" elif self.manifest.codecs == "mp4a.40.2": # AAC-LC self.codec = "m4a" elif self.manifest.codecs == "eac3": # Enhanced AC-3 self.codec = "m4a" elif self.manifest.codecs == "mp4a.40.34": # MP3 self.codec = "mp3" def set_album_dir(self, out_dir: Path): """This method sets self.album_dir, based on self.album and out_dir. In particular, self.album_dir is a subdirectory of out_dir based on the name of the album's artist""" artist_substring: str = self.album.artist.name.replace("..", "") album_substring: str = ( f"{self.album.name} " f"[{self.album.id}] [{self.album.release_date.year}]" ) self.album_dir: Path = out_dir / artist_substring / album_substring self.album_dir.mkdir(parents=True, exist_ok=True) if self.album.number_of_volumes > 1: volume_substring: str = f"Volume {self.metadata.volume_number}" (self.album_dir / volume_substring).mkdir(parents=True, exist_ok=True) def set_filename(self, audio_format: AudioFormat): """This method sets self.filename. It's based on self.metadata as well as audio_format. Additionally, if the available codecs in self.manifest don't match audio_format, warnings are logged""" _track_part: str = f"{self.metadata.track_number:02d} - {self.metadata.name}" if audio_format == AudioFormat.low: track_substring: str = f"{_track_part} [L]" elif audio_format == AudioFormat.high: track_substring: str = f"{_track_part} [H]" elif audio_format == AudioFormat.lossless: track_substring: str = f"{_track_part} [CD]" elif audio_format == AudioFormat.mqa: track_substring: str = f"{_track_part} [Q]" elif audio_format == AudioFormat.hi_res: track_substring: str = f"{_track_part} [HiRes]" elif audio_format == AudioFormat.dolby_atmos: track_substring: str = f"{_track_part} [A]" elif audio_format == AudioFormat.sony_360_reality_audio: track_substring: str = f"{_track_part} [360]" else: track_substring: str = _track_part # Check for MQA masquerading as HiRes here if audio_format == AudioFormat.hi_res: if self.manifest.codecs == "mqa": logger.warning( "Even though HiRes audio format was requested, this track is only " "available in MQA format. TIDAL regards this as 'HiRes' even though " "it is probably only lossless; i.e. 16-bit 44.1 kHz quality. " "Downloading of track will continue, but it will be marked as MQA." ) self.filename: Optional[str] = f"{_track_part} [Q].{self.codec}" elif (self.stream.bit_depth == 16) and (self.stream.sample_rate == 44100): logger.warning( "Even though HiRes audio format was requested, and TIDAL responded to " "that request without error, this track is only available in lossless " "format; i.e. 16-bit 44.1 kHz quality. Downloading of track will " "continue, but it will be marked as Lossless ([CD])." ) self.filename: Optional[str] = f"{_track_part} [CD].{self.codec}" else: self.filename: Optional[str] = f"{track_substring}.{self.codec}" else: self.filename: Optional[str] = f"{track_substring}.{self.codec}" # for use in playlist file ordering self.trackname: str = re.match(r"(?:\d{2,3} - )(.+?$)", self.filename).groups()[ 0 ] def set_outfile(self): """Uses self.album_dir and self.metadata and self.filename to craft the pathlib.Path object, self.outfile, that is a reference to where the track will be written on disk.""" if self.album.number_of_volumes > 1: self.outfile: Path = ( self.album_dir / f"Volume {self.metadata.volume_number}" / self.filename ) self.absolute_outfile = str(self.outfile.absolute()) else: self.outfile: Path = self.album_dir / self.filename self.absolute_outfile = str(self.outfile.absolute()) if (self.outfile.exists()) and (self.outfile.stat().st_size > 0): logger.info( f"Track {self.absolute_outfile} already exists " "and therefore will not be overwritten" ) return else: return self.outfile def save_artist_image(self, session: Session): """This method writes a JPEG file with the name of each of self.metadata.artists to self.album_dir""" for a in self.metadata.artists: track_artist_image: Path = ( self.album_dir / f"{a.name.replace('..', '')}.jpg" ) if not track_artist_image.exists():
logger = logging.getLogger("__name__") @dataclass class Track: track_id: int def __post_init__(self): self._has_lyrics: Optional[bool] = None self.tags: dict = {} self.album_cover_saved: bool = False def get_metadata(self, session: Session): self.metadata: Optional[TracksEndpointResponseJSON] = request_tracks( session, self.track_id ) def get_album(self, session: Session): self.album: Optional[AlbumsEndpointResponseJSON] = request_albums( session, self.metadata.album.id ) def get_credits(self, session: Session): self.credits: Optional[TracksCreditsResponseJSON] = request_credits( session, self.track_id ) def get_lyrics(self, session: Session): if self._has_lyrics is None: self.lyrics: Optional[TracksLyricsResponseJSON] = request_lyrics( session, self.track_id ) if self.lyrics is None: self._has_lyrics = False else: self._has_lyrics = True else: return self.lyrics def get_stream(self, session: Session, audio_format: AudioFormat): """Populates self.stream, self.manifest""" aq: Optional[str] = af_aq.get(audio_format) self.stream: Optional[TracksEndpointStreamResponseJSON] = request_stream( session, self.track_id, aq ) def set_manifest(self): """This method sets self.manifest and self.codec""" self.manifest: Manifest = manifester(self.stream) # https://dashif.org/codecs/audio/ if self.manifest.codecs == "flac": self.codec = "flac" elif self.manifest.codecs == "mqa": self.codec = "flac" elif self.manifest.codecs == "mha1": # Sony 360 Reality Audio self.codec = "mka" elif self.manifest.codecs == "mp4a.40.5": # HE-AAC self.codec = "m4a" elif self.manifest.codecs == "mp4a.40.29": # HE-AAC v2 self.codec = "m4a" elif self.manifest.codecs == "mp4a.40.2": # AAC-LC self.codec = "m4a" elif self.manifest.codecs == "eac3": # Enhanced AC-3 self.codec = "m4a" elif self.manifest.codecs == "mp4a.40.34": # MP3 self.codec = "mp3" def set_album_dir(self, out_dir: Path): """This method sets self.album_dir, based on self.album and out_dir. In particular, self.album_dir is a subdirectory of out_dir based on the name of the album's artist""" artist_substring: str = self.album.artist.name.replace("..", "") album_substring: str = ( f"{self.album.name} " f"[{self.album.id}] [{self.album.release_date.year}]" ) self.album_dir: Path = out_dir / artist_substring / album_substring self.album_dir.mkdir(parents=True, exist_ok=True) if self.album.number_of_volumes > 1: volume_substring: str = f"Volume {self.metadata.volume_number}" (self.album_dir / volume_substring).mkdir(parents=True, exist_ok=True) def set_filename(self, audio_format: AudioFormat): """This method sets self.filename. It's based on self.metadata as well as audio_format. Additionally, if the available codecs in self.manifest don't match audio_format, warnings are logged""" _track_part: str = f"{self.metadata.track_number:02d} - {self.metadata.name}" if audio_format == AudioFormat.low: track_substring: str = f"{_track_part} [L]" elif audio_format == AudioFormat.high: track_substring: str = f"{_track_part} [H]" elif audio_format == AudioFormat.lossless: track_substring: str = f"{_track_part} [CD]" elif audio_format == AudioFormat.mqa: track_substring: str = f"{_track_part} [Q]" elif audio_format == AudioFormat.hi_res: track_substring: str = f"{_track_part} [HiRes]" elif audio_format == AudioFormat.dolby_atmos: track_substring: str = f"{_track_part} [A]" elif audio_format == AudioFormat.sony_360_reality_audio: track_substring: str = f"{_track_part} [360]" else: track_substring: str = _track_part # Check for MQA masquerading as HiRes here if audio_format == AudioFormat.hi_res: if self.manifest.codecs == "mqa": logger.warning( "Even though HiRes audio format was requested, this track is only " "available in MQA format. TIDAL regards this as 'HiRes' even though " "it is probably only lossless; i.e. 16-bit 44.1 kHz quality. " "Downloading of track will continue, but it will be marked as MQA." ) self.filename: Optional[str] = f"{_track_part} [Q].{self.codec}" elif (self.stream.bit_depth == 16) and (self.stream.sample_rate == 44100): logger.warning( "Even though HiRes audio format was requested, and TIDAL responded to " "that request without error, this track is only available in lossless " "format; i.e. 16-bit 44.1 kHz quality. Downloading of track will " "continue, but it will be marked as Lossless ([CD])." ) self.filename: Optional[str] = f"{_track_part} [CD].{self.codec}" else: self.filename: Optional[str] = f"{track_substring}.{self.codec}" else: self.filename: Optional[str] = f"{track_substring}.{self.codec}" # for use in playlist file ordering self.trackname: str = re.match(r"(?:\d{2,3} - )(.+?$)", self.filename).groups()[ 0 ] def set_outfile(self): """Uses self.album_dir and self.metadata and self.filename to craft the pathlib.Path object, self.outfile, that is a reference to where the track will be written on disk.""" if self.album.number_of_volumes > 1: self.outfile: Path = ( self.album_dir / f"Volume {self.metadata.volume_number}" / self.filename ) self.absolute_outfile = str(self.outfile.absolute()) else: self.outfile: Path = self.album_dir / self.filename self.absolute_outfile = str(self.outfile.absolute()) if (self.outfile.exists()) and (self.outfile.stat().st_size > 0): logger.info( f"Track {self.absolute_outfile} already exists " "and therefore will not be overwritten" ) return else: return self.outfile def save_artist_image(self, session: Session): """This method writes a JPEG file with the name of each of self.metadata.artists to self.album_dir""" for a in self.metadata.artists: track_artist_image: Path = ( self.album_dir / f"{a.name.replace('..', '')}.jpg" ) if not track_artist_image.exists():
download_artist_image(session, a, self.album_dir)
16
2023-12-12 21:50:25+00:00
8k
lbcb-sci/GNNome
inference.py
[ { "identifier": "AssemblyGraphDataset", "path": "graph_dataset.py", "snippet": "class AssemblyGraphDataset(DGLDataset):\n def __init__(self, root, assembler, threads=32, generate=False):\n self.root = os.path.abspath(root)\n self.assembler = assembler\n self.threads = threads\n self.assembly_dir = os.path.join(self.root, self.assembler)\n # print(self.assembly_dir)\n\n if 'raw' not in os.listdir(self.root):\n subprocess.run(f\"mkdir 'raw'\", shell=True, cwd=self.root)\n if 'output' not in os.listdir(self.assembly_dir):\n subprocess.run(f\"mkdir 'output'\", shell=True, cwd=self.assembly_dir)\n if f'processed' not in os.listdir(self.assembly_dir):\n subprocess.run(f\"mkdir 'processed'\", shell=True, cwd=self.assembly_dir)\n if f'info' not in os.listdir(self.assembly_dir):\n subprocess.run(f\"mkdir 'info'\", shell=True, cwd=self.assembly_dir)\n\n raw_dir = os.path.join(self.root, 'raw')\n save_dir = os.path.join(self.assembly_dir, f'processed')\n self.output_dir = os.path.join(self.assembly_dir, f'output')\n self.info_dir = os.path.join(self.assembly_dir, f'info')\n \n config = get_config()\n raven_dir = config['raven_dir']\n self.raven_path = os.path.join(raven_dir, f'build/bin/raven')\n self.raven_path = os.path.abspath(self.raven_path)\n hifiasm_dir = config['hifiasm_dir']\n self.hifiasm_path = os.path.join(hifiasm_dir, f'hifiasm')\n self.hifiasm_path = os.path.abspath(self.hifiasm_path)\n \n super().__init__(name='assembly_graphs', raw_dir=raw_dir, save_dir=save_dir)\n\n self.graph_list = []\n if not generate:\n for file in os.listdir(self.save_dir):\n idx = int(file[:-4])\n graph = dgl.load_graphs(os.path.join(self.save_dir, file))[0][0]\n graph = preprocess_graph(graph, self.root, idx)\n graph = add_positional_encoding(graph)\n print(f'DGL graph idx={idx} info:\\n',graph)\n self.graph_list.append((idx, graph))\n self.graph_list.sort(key=lambda x: x[0])\n\n def has_cache(self):\n \"\"\"Check if the raw data is already processed and stored.\"\"\"\n raw_files = {int(re.findall(r'(\\d+).fast*', raw)[0]) for raw in os.listdir(self.raw_dir)}\n prc_files = {int(re.findall(r'(\\d+).dgl', prc)[0]) for prc in os.listdir(self.save_dir)}\n return len(raw_files - prc_files) == 0 # set difference\n\n def __len__(self):\n return len(os.listdir(self.save_dir))\n\n def __getitem__(self, idx):\n i, graph = self.graph_list[idx]\n return i, graph\n\n def process(self):\n pass" }, { "identifier": "get_hyperparameters", "path": "hyperparameters.py", "snippet": "def get_hyperparameters():\n return {\n\n # Setup\n 'data_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/hifi/train',\n 'temp_path': '/home/vrcekl/scratch/gnnome_assembly/train',\n 'eval_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/hifi/evaluate',\n 'asms_path': '/home/vrcekl/scratch/gnnome_assembly/evaluate',\n 'refs_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/references',\n 'checkpoints_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/checkpoints',\n 'models_path': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/models',\n \n 'data_path_ont': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/ont/train',\n 'eval_path_ont': '/mnt/sod2-project/csb4/wgs/lovro/gnnome_assembly/ont/evaluate',\n 'asms_path_ont': '/home/vrcekl/scratch/gnnome_assembly/evaluate_ont',\n \n 'raven_path': '',\n 'hifiasm_path': '',\n 'pbsim3_dir': '',\n \n 'sample_profile_id': '',\n 'sample_file': '',\n \n 'assembler': 'hifiasm',\n 'dataset': 'chm13', # Not used at the moment\n 'initials': 'LV',\n\n 'device': 'cuda:0' if torch.cuda.is_available() else 'cpu',\n 'seed': 1,\n 'wandb_mode': 'disabled', # switch between 'online' and 'disabled'\n # 'wandb_project': 'GeNNome-hifiasm',\n 'wandb_project': 'hifiasm-runs',\n # 'wandb_project': 'Sep-23_ablations',\n\n 'chr_overfit': 0,\n 'plot_nga50_during_training': False,\n 'eval_frequency': 20, \n\n # Data\n 'use_similarities': True,\n # 'pos_to_neg_ratio': 16.5, # Not used, but could be a hyperparam for loss weight\n\n # Model\n 'dim_latent': 64,\n 'num_gnn_layers': 8,\n 'node_features': 2,\n 'edge_features': 2, # Put 2 if you use similarities, 1 otherwise\n 'hidden_edge_features': 16,\n 'hidden_edge_scores': 64,\n 'nb_pos_enc': 0,\n 'type_pos_enc': 'PR',\n 'batch_norm': True,\n # 'dropout': 0.08,\n\n # Training\n 'num_epochs': 200,\n 'lr': 1e-4,\n 'use_symmetry_loss': True,\n 'alpha': 0.1,\n 'num_parts_metis_train': 200,\n 'num_parts_metis_eval': 200,\n 'num_nodes_per_cluster': 10000, # 2000 = max 10GB GPU memory for d=128, L=8\n 'npc_lower_bound': 1, # 0.8\n 'npc_upper_bound': 1, # 1.2\n 'k_extra_hops': 1,\n 'patience': 2,\n 'decay': 0.95,\n 'masking': True,\n 'mask_frac_low': 80, # ~ 25x\n 'mask_frac_high': 100, # ~ 60x\n\n # Decoding\n 'strategy': 'greedy',\n 'num_decoding_paths': 100,\n 'decode_with_labels': False,\n 'load_checkpoint': True,\n 'num_threads': 32,\n 'B': 1,\n 'len_threshold': 10,\n }" } ]
import argparse import os import sys import pickle import random import math import collections import time import psutil import torch import torch.nn.functional as F import dgl import models import evaluate import utils from tqdm import tqdm from datetime import datetime from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor from multiprocessing import Manager from graph_dataset import AssemblyGraphDataset from hyperparameters import get_hyperparameters
4,905
if len_walk_it > 2: meanLogProb_it = sumLogProb_it / (len_walk_it - 2) # len(walk_f) - 1 + len(walk_b) - 1 <-> starting edge is neglected try: meanLogProb_scaled_it = meanLogProb_it / math.sqrt(len_contig_it) except ValueError: print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12}') print(f'Value error: something is wrong here!') meanLogProb_scaled_it = 0 elif len_walk_it == 2: meanLogProb_it = 0.0 try: meanLogProb_scaled_it = meanLogProb_it / math.sqrt(len_contig_it) except ValueError: print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12}') print(f'Value error: something is wrong here!') meanLogProb_scaled_it = 0 else: # len_walk_it == 1 <-> SELF-LOOP! len_contig_it = 0 sumLogProb_it = 0.0 meanLogProb_it = 0.0 meanLogprob_scaled_it = 0.0 print(f'SELF-LOOP!') print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12} ' \ f'sumLogProb={sumLogProb_it:<12.3f} meanLogProb={meanLogProb_it:<12.4} meanLogProb_scaled={meanLogProb_scaled_it:<12.4}') indx += 1 all_walks.append(walk_it) all_visited_iter.append(visited_iter) all_contig_lens.append(len_contig_it) all_sumLogProbs.append(sumLogProb_it) all_meanLogProbs.append(meanLogProb_it) all_meanLogProbs_scaled.append(meanLogProb_scaled_it) best = max(all_contig_lens) idxx = all_contig_lens.index(best) elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_candidates) print(f'Elapsed time (get_candidates): {elapsed}') best_walk = all_walks[idxx] best_visited = all_visited_iter[idxx] # Add all jumped-over nodes time_start_get_visited = datetime.now() trans = set() for ss, dd in zip(best_walk[:-1], best_walk[1:]): t1 = set(succs[ss]) & set(preds[dd]) t2 = {t^1 for t in t1} trans = trans | t1 | t2 best_visited = best_visited | trans best_contig_len = all_contig_lens[idxx] best_sumLogProb = all_sumLogProbs[idxx] best_meanLogProb = all_meanLogProbs[idxx] best_meanLogProb_scaled = all_meanLogProbs_scaled[idxx] elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_visited) print(f'Elapsed time (get visited): {elapsed}') print(f'\nChosen walk with index: {idxx}') print(f'len_walk={len(best_walk):<8} len_contig={best_contig_len:<12} ' \ f'sumLogProb={best_sumLogProb:<12.3f} meanLogProb={best_meanLogProb:<12.4} meanLogProb_scaled={best_meanLogProb_scaled:<12.4}\n') if best_contig_len < 70000: break all_contigs.append(best_walk) visited |= best_visited all_walks_len.append(len(best_walk)) all_contigs_len.append(best_contig_len) print(f'All walks len: {all_walks_len}') print(f'All contigs len: {all_contigs_len}\n') if len(all_contigs) % 10 == 0: checkpoint = { 'walks': all_contigs, 'visited': visited, 'all_walks_len': all_walks_len, 'all_contigs_len': all_contigs_len } if not DEBUG: try: pickle.dump(checkpoint, open(f'{checkpoint_dir}/checkpoint_tmp.pkl', 'wb')) os.rename(f'{checkpoint_dir}/checkpoint_tmp.pkl', f'{checkpoint_dir}/checkpoint.pkl') except OSError: print(f'Checkpoint was not saved. Last available checkopint: {checkpoint_dir}/checkpoint.pkl') raise return all_contigs def inference(data_path, model_path, assembler, savedir, device='cpu', dropout=None): """Using a pretrained model, get walks and contigs on new data.""" hyperparameters = get_hyperparameters() seed = hyperparameters['seed'] num_gnn_layers = hyperparameters['num_gnn_layers'] hidden_features = hyperparameters['dim_latent'] nb_pos_enc = hyperparameters['nb_pos_enc'] batch_norm = hyperparameters['batch_norm'] node_features = hyperparameters['node_features'] edge_features = hyperparameters['edge_features'] hidden_edge_features = hyperparameters['hidden_edge_features'] hidden_edge_scores = hyperparameters['hidden_edge_scores'] strategy = hyperparameters['strategy'] B = hyperparameters['B'] nb_paths = hyperparameters['num_decoding_paths'] len_threshold = hyperparameters['len_threshold'] use_labels = hyperparameters['decode_with_labels'] load_checkpoint = hyperparameters['load_checkpoint'] threads = hyperparameters['num_threads'] # assembly_path = hyperparameters['asms_path'] device = 'cpu' # Hardcode, because we cannot do inference on a GPU - usually not enough memory to load the whole graph utils.set_seed(seed) time_start = datetime.now()
DEBUG = False def get_contig_length(walk, graph): total_length = 0 idx_src = walk[:-1] idx_dst = walk[1:] prefix = graph.edges[idx_src, idx_dst].data['prefix_length'] total_length = prefix.sum().item() total_length += graph.ndata['read_length'][walk[-1]] return total_length def get_subgraph(g, visited, device): """Remove the visited nodes from the graph.""" remove_node_idx = torch.LongTensor([item for item in visited]) list_node_idx = torch.arange(g.num_nodes()) keep_node_idx = torch.ones(g.num_nodes()) keep_node_idx[remove_node_idx] = 0 keep_node_idx = list_node_idx[keep_node_idx==1].int().to(device) sub_g = dgl.node_subgraph(g, keep_node_idx, store_ids=True) sub_g.ndata['idx_nodes'] = torch.arange(sub_g.num_nodes()).to(device) map_subg_to_g = sub_g.ndata[dgl.NID] return sub_g, map_subg_to_g def sample_edges(prob_edges, nb_paths): """Sample edges with Bernoulli sampling.""" if prob_edges.shape[0] > 2**24: prob_edges = prob_edges[:2**24] # torch.distributions.categorical.Categorical does not support tensors longer than 2**24 random_search = False if random_search: idx_edges = torch.randint(0, prob_edges.shape[0], (nb_paths,)) return idx_edges prob_edges = prob_edges.masked_fill(prob_edges<1e-9, 1e-9) prob_edges = prob_edges/ prob_edges.sum() prob_edges_nb_paths = prob_edges.repeat(nb_paths, 1) idx_edges = torch.distributions.categorical.Categorical(prob_edges_nb_paths).sample() return idx_edges def greedy_forwards(start, logProbs, neighbors, predecessors, edges, visited_old): """Greedy walk forwards.""" current = start walk = [] visited = set() sumLogProb = torch.tensor([0.0]) iteration = 0 while True: walk.append(current) visited.add(current) visited.add(current ^ 1) neighs_current = neighbors[current] if len(neighs_current) == 0: break if len(neighs_current) == 1: neighbor = neighs_current[0] if neighbor in visited_old or neighbor in visited: break else: sumLogProb += logProbs[edges[current, neighbor]] current = neighbor continue masked_neighbors = [n for n in neighs_current if not (n in visited_old or n in visited)] neighbor_edges = [edges[current, n] for n in masked_neighbors] if not neighbor_edges: break neighbor_p = logProbs[neighbor_edges] logProb, index = torch.topk(neighbor_p, k=1, dim=0) sumLogProb += logProb iteration += 1 current = masked_neighbors[index] return walk, visited, sumLogProb def greedy_backwards_rc(start, logProbs, predecessors, neighbors, edges, visited_old): """Greedy walk backwards.""" current = start ^ 1 walk = [] visited = set() sumLogProb = torch.tensor([0.0]) iteration = 0 while True: walk.append(current) visited.add(current) visited.add(current ^ 1) neighs_current = neighbors[current] if len(neighs_current) == 0: break if len(neighs_current) == 1: neighbor = neighs_current[0] if neighbor in visited_old or neighbor in visited: break else: sumLogProb += logProbs[edges[current, neighbor]] current = neighbor continue masked_neighbors = [n for n in neighs_current if not (n in visited_old or n in visited)] neighbor_edges = [edges[current, n] for n in masked_neighbors] if not neighbor_edges: break neighbor_p = logProbs[neighbor_edges] logProb, index = torch.topk(neighbor_p, k=1, dim=0) sumLogProb += logProb iteration += 1 current = masked_neighbors[index] walk = list(reversed([w ^ 1 for w in walk])) return walk, visited, sumLogProb def run_greedy_both_ways(src, dst, logProbs, succs, preds, edges, visited): walk_f, visited_f, sumLogProb_f = greedy_forwards(dst, logProbs, succs, preds, edges, visited) walk_b, visited_b, sumLogProb_b = greedy_backwards_rc(src, logProbs, preds, succs, edges, visited | visited_f) return walk_f, walk_b, visited_f, visited_b, sumLogProb_f, sumLogProb_b def get_contigs_greedy(g, succs, preds, edges, nb_paths=50, len_threshold=20, use_labels=False, checkpoint_dir=None, load_checkpoint=False, device='cpu', threads=32): """Iteratively search for contigs in a graph until the threshold is met.""" g = g.to('cpu') all_contigs = [] all_walks_len = [] all_contigs_len = [] visited = set() idx_contig = -1 B = 1 if use_labels: scores = g.edata['y'].to('cpu') scores = scores.masked_fill(scores<1e-9, 1e-9) logProbs = torch.log(scores) else: scores = g.edata['score'].to('cpu') logProbs = torch.log(torch.sigmoid(g.edata['score'].to('cpu'))) print(f'Starting to decode with greedy...') print(f'num_candidates: {nb_paths}, len_threshold: {len_threshold}\n') ckpt_file = os.path.join(checkpoint_dir, 'checkpoint.pkl') if load_checkpoint and os.path.isfile(ckpt_file): print(f'Loading checkpoint from: {checkpoint_dir}\n') checkpoint = pickle.load(open(f'{checkpoint_dir}/checkpoint.pkl', 'rb')) all_contigs = checkpoint['walks'] visited = checkpoint['visited'] idx_contig = len(all_contigs) - 1 all_walks_len = checkpoint['all_walks_len'] all_contigs_len = checkpoint['all_contigs_len'] while True: idx_contig += 1 time_start_sample_edges = datetime.now() sub_g, map_subg_to_g = get_subgraph(g, visited, 'cpu') if sub_g.num_edges() == 0: break if use_labels: # Debugging prob_edges = sub_g.edata['y'] else: prob_edges = torch.sigmoid(sub_g.edata['score']).squeeze() idx_edges = sample_edges(prob_edges, nb_paths) elapsed = utils.timedelta_to_str(datetime.now() - time_start_sample_edges) print(f'Elapsed time (sample edges): {elapsed}') all_walks = [] all_visited_iter = [] all_contig_lens = [] all_sumLogProbs = [] all_meanLogProbs = [] all_meanLogProbs_scaled = [] print(f'\nidx_contig: {idx_contig}, nb_processed_nodes: {len(visited)}, ' \ f'nb_remaining_nodes: {g.num_nodes() - len(visited)}, nb_original_nodes: {g.num_nodes()}') # Get nb_paths paths for a single iteration, then take the longest one time_start_get_candidates = datetime.now() with ThreadPoolExecutor(1) as executor: if DEBUG: print(f'Starting with greedy for one candidate', flush=True) all_cand_time = datetime.now() results = {} start_times = {} for e, idx in enumerate(idx_edges): src_init_edges = map_subg_to_g[sub_g.edges()[0][idx]].item() dst_init_edges = map_subg_to_g[sub_g.edges()[1][idx]].item() start_times[e] = datetime.now() if DEBUG: print(f'About to submit job - decoding from edge {e}: {src_init_edges, dst_init_edges}', flush=True) future = executor.submit(run_greedy_both_ways, src_init_edges, dst_init_edges, logProbs, succs, preds, edges, visited) results[(src_init_edges, dst_init_edges)] = (future, e) if DEBUG: process = psutil.Process(os.getpid()) children = process.children(recursive=True) print(f'Processes ran: {e+1}\n' \ f'Time needed: {utils.timedelta_to_str(datetime.now() - all_cand_time)}\n' \ f'Current process ID: {os.getpid()}\n' \ f'Total memory used (MB): {process.memory_info().rss / 1024 ** 2}', flush=True) if len(children) == 0: print(f'Process has no children!') for child in children: print(f'Child pid is {child.pid}', flush=True) print() indx = 0 for k, (f, e) in results.items(): # key, future -> Why did I not name this properly? walk_f, walk_b, visited_f, visited_b, sumLogProb_f, sumLogProb_b = f.result() if DEBUG: print(f'Finished with candidate {e}: {k}\t' \ f'Time needed: {utils.timedelta_to_str(datetime.now() - start_times[e])}') walk_it = walk_b + walk_f visited_iter = visited_f | visited_b sumLogProb_it = sumLogProb_f.item() + sumLogProb_b.item() len_walk_it = len(walk_it) len_contig_it = get_contig_length(walk_it, g).item() if k[0] == k[1]: len_walk_it = 1 if len_walk_it > 2: meanLogProb_it = sumLogProb_it / (len_walk_it - 2) # len(walk_f) - 1 + len(walk_b) - 1 <-> starting edge is neglected try: meanLogProb_scaled_it = meanLogProb_it / math.sqrt(len_contig_it) except ValueError: print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12}') print(f'Value error: something is wrong here!') meanLogProb_scaled_it = 0 elif len_walk_it == 2: meanLogProb_it = 0.0 try: meanLogProb_scaled_it = meanLogProb_it / math.sqrt(len_contig_it) except ValueError: print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12}') print(f'Value error: something is wrong here!') meanLogProb_scaled_it = 0 else: # len_walk_it == 1 <-> SELF-LOOP! len_contig_it = 0 sumLogProb_it = 0.0 meanLogProb_it = 0.0 meanLogprob_scaled_it = 0.0 print(f'SELF-LOOP!') print(f'{indx:<3}: src={k[0]:<8} dst={k[1]:<8} len_walk={len_walk_it:<8} len_contig={len_contig_it:<12} ' \ f'sumLogProb={sumLogProb_it:<12.3f} meanLogProb={meanLogProb_it:<12.4} meanLogProb_scaled={meanLogProb_scaled_it:<12.4}') indx += 1 all_walks.append(walk_it) all_visited_iter.append(visited_iter) all_contig_lens.append(len_contig_it) all_sumLogProbs.append(sumLogProb_it) all_meanLogProbs.append(meanLogProb_it) all_meanLogProbs_scaled.append(meanLogProb_scaled_it) best = max(all_contig_lens) idxx = all_contig_lens.index(best) elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_candidates) print(f'Elapsed time (get_candidates): {elapsed}') best_walk = all_walks[idxx] best_visited = all_visited_iter[idxx] # Add all jumped-over nodes time_start_get_visited = datetime.now() trans = set() for ss, dd in zip(best_walk[:-1], best_walk[1:]): t1 = set(succs[ss]) & set(preds[dd]) t2 = {t^1 for t in t1} trans = trans | t1 | t2 best_visited = best_visited | trans best_contig_len = all_contig_lens[idxx] best_sumLogProb = all_sumLogProbs[idxx] best_meanLogProb = all_meanLogProbs[idxx] best_meanLogProb_scaled = all_meanLogProbs_scaled[idxx] elapsed = utils.timedelta_to_str(datetime.now() - time_start_get_visited) print(f'Elapsed time (get visited): {elapsed}') print(f'\nChosen walk with index: {idxx}') print(f'len_walk={len(best_walk):<8} len_contig={best_contig_len:<12} ' \ f'sumLogProb={best_sumLogProb:<12.3f} meanLogProb={best_meanLogProb:<12.4} meanLogProb_scaled={best_meanLogProb_scaled:<12.4}\n') if best_contig_len < 70000: break all_contigs.append(best_walk) visited |= best_visited all_walks_len.append(len(best_walk)) all_contigs_len.append(best_contig_len) print(f'All walks len: {all_walks_len}') print(f'All contigs len: {all_contigs_len}\n') if len(all_contigs) % 10 == 0: checkpoint = { 'walks': all_contigs, 'visited': visited, 'all_walks_len': all_walks_len, 'all_contigs_len': all_contigs_len } if not DEBUG: try: pickle.dump(checkpoint, open(f'{checkpoint_dir}/checkpoint_tmp.pkl', 'wb')) os.rename(f'{checkpoint_dir}/checkpoint_tmp.pkl', f'{checkpoint_dir}/checkpoint.pkl') except OSError: print(f'Checkpoint was not saved. Last available checkopint: {checkpoint_dir}/checkpoint.pkl') raise return all_contigs def inference(data_path, model_path, assembler, savedir, device='cpu', dropout=None): """Using a pretrained model, get walks and contigs on new data.""" hyperparameters = get_hyperparameters() seed = hyperparameters['seed'] num_gnn_layers = hyperparameters['num_gnn_layers'] hidden_features = hyperparameters['dim_latent'] nb_pos_enc = hyperparameters['nb_pos_enc'] batch_norm = hyperparameters['batch_norm'] node_features = hyperparameters['node_features'] edge_features = hyperparameters['edge_features'] hidden_edge_features = hyperparameters['hidden_edge_features'] hidden_edge_scores = hyperparameters['hidden_edge_scores'] strategy = hyperparameters['strategy'] B = hyperparameters['B'] nb_paths = hyperparameters['num_decoding_paths'] len_threshold = hyperparameters['len_threshold'] use_labels = hyperparameters['decode_with_labels'] load_checkpoint = hyperparameters['load_checkpoint'] threads = hyperparameters['num_threads'] # assembly_path = hyperparameters['asms_path'] device = 'cpu' # Hardcode, because we cannot do inference on a GPU - usually not enough memory to load the whole graph utils.set_seed(seed) time_start = datetime.now()
ds = AssemblyGraphDataset(data_path, assembler)
0
2023-12-08 04:45:45+00:00
8k
SusheelThapa/C-DOTS
app.py
[ { "identifier": "CodeDocumenter", "path": "features/documenter.py", "snippet": "class CodeDocumenter(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n self.typing_timer = QTimer(self)\n self.typing_timer.timeout.connect(self.type_next_character)\n self.current_typing_position = 0\n\n def init_ui(self):\n self.setWindowTitle(\"Article Generator\")\n self.setGeometry(100, 100, 1500, 900)\n self.setStyleSheet(\"background-color: #FFFFFF; color: #000000;\")\n\n main_layout = QHBoxLayout()\n main_layout.setContentsMargins(20, 20, 20, 20)\n\n left_layout = QVBoxLayout()\n left_layout.setContentsMargins(10, 20, 10, 20)\n left_layout.setSpacing(20)\n\n right_layout = QVBoxLayout()\n right_layout.setContentsMargins(10, 10, 10, 10)\n\n splitter = QSplitter()\n\n language_label = QLabel(\"Select Programming Language:\")\n language_label.setFont(QFont(\"Arial\", 16))\n\n self.language_selection = QComboBox()\n self.language_selection.setFont(QFont(\"Arial\", 16))\n self.language_selection.setStyleSheet(\n \"QComboBox { padding: 8px; background-color: #E0E0E0 ; color: #000000; }\"\n )\n self.language_selection.addItems([\"Python\", \"Java\", \"C++\", \"JavaScript\", \"C\"])\n\n code_label = QLabel(\"Code to add Documentation\")\n code_label.setFont(QFont(\"Arial\", 16))\n\n self.code_entry = QTextEdit()\n self.code_entry.setFont(QFont(\"Arial\", 16))\n self.code_entry.setStyleSheet(\n \"QTextEdit { border-radius: 5px; padding: 5px; background-color: #E0E0E0 ; color: #000000; }\"\n )\n\n generate_doc_button = QPushButton(\"Generate Documentation\")\n generate_doc_button.setFont(QFont(\"Arial\", 18))\n generate_doc_button.setStyleSheet(\n \"QPushButton { border-radius: 10px; padding: 10px; background-color: #4CAF50 ; color: #FFFFFF; font-weight:600;} QPushButton:hover { background-color: #45A049; }\"\n )\n generate_doc_button.clicked.connect(self.generate_documentation)\n\n left_layout.addWidget(language_label)\n left_layout.addWidget(self.language_selection)\n left_layout.addWidget(code_label)\n left_layout.addWidget(self.code_entry)\n left_layout.addWidget(generate_doc_button)\n\n self.generated_text_area = QTextBrowser()\n self.generated_text_area.setReadOnly(True)\n self.generated_text_area.setFont(QFont(\"Arial\", 16))\n self.generated_text_area.setStyleSheet(\n \"QTextBrowser { border-radius: 5px; padding: 5px; background-color: #E0E0E0 ; color: #000000; }\"\n )\n\n right_layout.addWidget(self.generated_text_area)\n\n # Assembling the main layout\n left_widget = QWidget()\n left_widget.setLayout(left_layout)\n right_widget = QWidget()\n right_widget.setLayout(right_layout)\n\n splitter.addWidget(left_widget)\n splitter.addWidget(right_widget)\n splitter.setSizes([400, 800])\n\n main_layout.addWidget(splitter)\n self.setLayout(main_layout)\n\n def generate_documentation(self):\n language = self.language_selection.currentText()\n code = self.code_entry.toPlainText()\n\n self.generated_text_area.setText(\"Documentng the Code Snippets...\")\n\n self.worker = Worker(language, code)\n self.thread = threading.Thread(target=self.worker.run)\n self.worker.finished.connect(self.on_finished)\n self.thread.start()\n\n def on_finished(self, processed_text):\n self.processed_text = processed_text\n self.current_typing_position = 0\n self.typing_timer.start(20)\n\n def type_next_character(self):\n if self.current_typing_position < len(self.processed_text):\n if self.current_typing_position == 0:\n self.generated_text_area.clear()\n\n current_text = self.processed_text[self.current_typing_position]\n self.generated_text_area.moveCursor(QTextCursor.End)\n self.generated_text_area.insertPlainText(current_text)\n self.current_typing_position += 1\n else:\n self.typing_timer.stop()" }, { "identifier": "CodeOptimizer", "path": "features/optimizer.py", "snippet": "class CodeOptimizer(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n self.typing_timer = QTimer(self)\n self.typing_timer.timeout.connect(self.type_next_character)\n self.current_typing_position = 0\n\n def init_ui(self):\n self.setWindowTitle(\"Article Generator\")\n self.setGeometry(100, 100, 1500, 900)\n self.setStyleSheet(\"background-color: #FFFFFF; color: #000000;\")\n\n main_layout = QHBoxLayout()\n main_layout.setContentsMargins(20, 20, 20, 20)\n\n left_layout = QVBoxLayout()\n left_layout.setContentsMargins(10, 20, 10, 20)\n left_layout.setSpacing(20)\n\n right_layout = QVBoxLayout()\n right_layout.setContentsMargins(10, 10, 10, 10)\n\n splitter = QSplitter()\n\n language_label = QLabel(\"Select Programming Language:\")\n language_label.setFont(QFont(\"Arial\", 16))\n\n self.language_selection = QComboBox()\n self.language_selection.setFont(QFont(\"Arial\", 16))\n self.language_selection.setStyleSheet(\n \"QComboBox { padding: 8px; background-color: #dae6db; color: #000000; }\"\n )\n self.language_selection.addItems([\"Python\", \"Java\", \"C++\", \"JavaScript\", \"C\"])\n\n code_label = QLabel(\"Code to Optimize\")\n code_label.setFont(QFont(\"Arial\", 16))\n\n self.code_entry = QTextEdit()\n self.code_entry.setFont(QFont(\"Arial\", 16))\n self.code_entry.setStyleSheet(\n \"QTextEdit { border-radius: 5px; padding: 5px; background-color: #dae6db; color: #000000; }\"\n )\n\n generate_doc_button = QPushButton(\"Optimize Code\")\n generate_doc_button.setFont(QFont(\"Arial\", 18))\n generate_doc_button.setStyleSheet(\n \"QPushButton { border-radius: 10px; padding: 10px; background-color: #1565C0; color: #FFFFFF; font-weight:600; } QPushButton:hover { background-color: #0F4FA8; }\"\n )\n generate_doc_button.clicked.connect(self.optimize_code)\n\n left_layout.addWidget(language_label)\n left_layout.addWidget(self.language_selection)\n left_layout.addWidget(code_label)\n left_layout.addWidget(self.code_entry)\n left_layout.addWidget(generate_doc_button)\n\n self.generated_text_area = QTextBrowser()\n self.generated_text_area.setReadOnly(True)\n self.generated_text_area.setFont(QFont(\"Arial\", 16))\n self.generated_text_area.setStyleSheet(\n \"QTextBrowser { border-radius: 5px; padding: 5px; background-color: #dae6db; color: #000000; }\"\n )\n\n right_layout.addWidget(self.generated_text_area)\n\n left_widget = QWidget()\n left_widget.setLayout(left_layout)\n right_widget = QWidget()\n right_widget.setLayout(right_layout)\n\n splitter.addWidget(left_widget)\n splitter.addWidget(right_widget)\n splitter.setSizes([400, 800])\n\n main_layout.addWidget(splitter)\n self.setLayout(main_layout)\n\n def optimize_code(self):\n language = self.language_selection.currentText()\n code = self.code_entry.toPlainText()\n\n self.generated_text_area.setText(\"Optimizing Code Snippets...\")\n\n self.worker = Worker(language, code)\n self.thread = threading.Thread(target=self.worker.run)\n self.worker.finished.connect(self.on_finished)\n self.thread.start()\n\n def on_finished(self, processed_text):\n self.processed_text = processed_text\n self.current_typing_position = 0\n self.typing_timer.start(20)\n\n def type_next_character(self):\n if self.current_typing_position < len(self.processed_text):\n if self.current_typing_position == 0:\n self.generated_text_area.clear()\n\n current_text = self.processed_text[self.current_typing_position]\n self.generated_text_area.moveCursor(QTextCursor.End)\n self.generated_text_area.insertPlainText(current_text)\n self.current_typing_position += 1\n else:\n self.typing_timer.stop() " }, { "identifier": "CodeSummarizer", "path": "features/summarizer.py", "snippet": "class CodeSummarizer(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n self.typing_timer = QTimer(self)\n self.typing_timer.timeout.connect(self.type_next_character)\n self.current_typing_position = 0\n\n def init_ui(self):\n self.setWindowTitle(\"Code Summarizer\")\n self.setGeometry(100, 100, 1500, 900)\n self.setStyleSheet(\"background-color: #FFFFFF; color: #000000;\")\n\n main_layout = QHBoxLayout()\n main_layout.setContentsMargins(20, 20, 20, 20)\n\n left_layout = QVBoxLayout()\n left_layout.setContentsMargins(10, 20, 10, 20)\n left_layout.setSpacing(20)\n\n right_layout = QVBoxLayout()\n right_layout.setContentsMargins(10, 10, 10, 10)\n\n splitter = QSplitter()\n\n language_label = QLabel(\"Select Programming Language:\")\n language_label.setFont(QFont(\"Arial\", 16))\n\n self.language_selection = QComboBox()\n self.language_selection.setFont(QFont(\"Arial\", 16))\n self.language_selection.setStyleSheet(\n \"QComboBox { padding: 8px; background-color: #F2EFEF; color: #202020; }\"\n )\n self.language_selection.addItems([\"Python\", \"Java\", \"C++\", \"JavaScript\", \"C\"])\n\n code_label = QLabel(\"Code to Summarize\")\n code_label.setFont(QFont(\"Arial\", 16))\n\n self.code_entry = QTextEdit()\n self.code_entry.setFont(QFont(\"Arial\", 16))\n self.code_entry.setStyleSheet(\n \"QTextEdit { border-radius: 5px; padding: 5px; background-color: #F2EFEF; color: #202020; }\"\n )\n\n generate_summarize_button = QPushButton(\"Summarize Code\")\n generate_summarize_button.setFont(QFont(\"Arial\", 18))\n generate_summarize_button.setStyleSheet(\n \"QPushButton { border-radius: 10px; padding: 10px; background-color: #601527; color: #FFFFFF; font-weight:600; } QPushButton:hover { background-color: #601527; }\"\n )\n generate_summarize_button.clicked.connect(self.summarize_code)\n\n left_layout.addWidget(language_label)\n left_layout.addWidget(self.language_selection)\n left_layout.addWidget(code_label)\n left_layout.addWidget(self.code_entry)\n left_layout.addWidget(generate_summarize_button)\n\n self.generated_text_area = QTextBrowser()\n self.generated_text_area.setReadOnly(True)\n self.generated_text_area.setFont(QFont(\"Arial\", 16))\n self.generated_text_area.setStyleSheet(\n \"QTextBrowser { border-radius: 5px; padding: 5px; background-color: #F2EFEF; color: #202020; }\"\n )\n\n right_layout.addWidget(self.generated_text_area)\n\n left_widget = QWidget()\n left_widget.setLayout(left_layout)\n right_widget = QWidget()\n right_widget.setLayout(right_layout)\n\n splitter.addWidget(left_widget)\n splitter.addWidget(right_widget)\n splitter.setSizes([400, 800])\n\n main_layout.addWidget(splitter)\n self.setLayout(main_layout)\n\n def summarize_code(self):\n language = self.language_selection.currentText()\n code = self.code_entry.toPlainText()\n\n self.generated_text_area.setText(\"Summarizing the Code Snippets...\")\n\n self.worker = Worker(language, code)\n self.thread = threading.Thread(target=self.worker.run)\n self.worker.finished.connect(self.on_finished)\n self.thread.start()\n\n def on_finished(self, processed_text):\n self.processed_text = processed_text\n self.current_typing_position = 0\n self.typing_timer.start(20)\n\n def type_next_character(self):\n if self.current_typing_position < len(self.processed_text):\n if self.current_typing_position == 0:\n self.generated_text_area.clear()\n\n current_text = self.processed_text[self.current_typing_position]\n self.generated_text_area.moveCursor(QTextCursor.End)\n self.generated_text_area.insertPlainText(current_text)\n self.current_typing_position += 1\n else:\n self.typing_timer.stop()" }, { "identifier": "CodeTranslator", "path": "features/translator.py", "snippet": "class CodeTranslator(QWidget):\n def __init__(self):\n super().__init__()\n self.init_ui()\n\n self.typing_timer = QTimer(self)\n self.typing_timer.timeout.connect(self.type_next_character)\n self.current_typing_position = 0\n self.processed_text = \"\" \n\n def init_ui(self):\n self.setWindowTitle(\"Code Translator\")\n self.setGeometry(100, 100, 1500, 900)\n self.setStyleSheet(\"background-color: #FFFFFF; color: #000000;\")\n\n main_layout = QHBoxLayout()\n self.setup_main_layout(main_layout)\n self.setLayout(main_layout)\n\n def setup_main_layout(self, main_layout):\n main_layout.setContentsMargins(20, 20, 20, 20)\n left_layout = self.setup_left_layout()\n right_layout = self.setup_right_layout()\n\n splitter = QSplitter()\n left_widget = QWidget()\n left_widget.setLayout(left_layout)\n right_widget = QWidget()\n right_widget.setLayout(right_layout)\n splitter.addWidget(left_widget)\n splitter.addWidget(right_widget)\n splitter.setSizes([400, 800])\n\n main_layout.addWidget(splitter)\n\n def setup_left_layout(self):\n left_layout = QVBoxLayout()\n left_layout.setContentsMargins(10, 20, 10, 20)\n left_layout.setSpacing(20)\n\n source_lang_label = self.create_label(\"Select Source Programming Language:\")\n self.source_lang_selection = self.create_combobox(\n [\"Python\", \"Java\", \"C++\", \"JavaScript\", \"C\"]\n )\n target_lang_label = self.create_label(\"Select Target Programming Language:\")\n self.target_lang_selection = self.create_combobox(\n [\"Python\", \"Java\", \"C++\", \"JavaScript\", \"C\"]\n )\n code_label = self.create_label(\"Code Snippets:\")\n self.code_entry = self.create_text_edit()\n translate_button = self.create_button(\n \"Translate Code\", self.generate_translate_code\n )\n\n left_layout.addWidget(source_lang_label)\n left_layout.addWidget(self.source_lang_selection)\n left_layout.addWidget(target_lang_label)\n left_layout.addWidget(self.target_lang_selection)\n left_layout.addWidget(code_label)\n left_layout.addWidget(self.code_entry)\n left_layout.addWidget(translate_button)\n\n return left_layout\n\n def setup_right_layout(self):\n right_layout = QVBoxLayout()\n right_layout.setContentsMargins(10, 10, 10, 10)\n self.generated_text_area = self.create_text_browser()\n right_layout.addWidget(self.generated_text_area)\n\n return right_layout\n\n def create_label(self, text):\n label = QLabel(text)\n label.setFont(QFont(\"Arial\", 16))\n return label\n\n def create_combobox(self, items):\n combobox = QComboBox()\n combobox.setFont(QFont(\"Arial\", 16))\n combobox.addItems(items)\n combobox.setStyleSheet(\n \"padding: 5px; background-color: #F0E6F4; color: #303030;\"\n )\n return combobox\n\n def create_text_edit(self):\n text_edit = QTextEdit()\n text_edit.setFont(QFont(\"Arial\", 16))\n text_edit.setStyleSheet(\n \"border-radius: 5px; padding: 5px; background-color: #F0E6F4; color: #303030;\"\n )\n return text_edit\n\n def create_button(self, text, callback):\n button = QPushButton(text)\n button.setFont(QFont(\"Arial\", 18))\n button.clicked.connect(callback)\n button.setStyleSheet(\n \"\"\"\n QPushButton {\n border-radius: 10px;\n padding: 10px;\n background-color: #6A1B9A;\n color: #FFFFFF;\n font-weight:600;\n }\n QPushButton:hover {\n background-color: #6A1B9A;\n }\n \"\"\"\n )\n return button\n\n def create_text_browser(self):\n text_browser = QTextBrowser()\n text_browser.setReadOnly(True)\n text_browser.setFont(QFont(\"Arial\", 16))\n text_browser.setStyleSheet(\n \"border-radius: 5px; padding: 5px; background-color: #F0E6F4; color: #303030;\"\n )\n return text_browser\n\n def generate_translate_code(self):\n source_lang = self.source_lang_selection.currentText()\n target_lang = self.target_lang_selection.currentText()\n code = self.code_entry.toPlainText()\n\n self.generated_text_area.setText(\n f\"Translating Code Snippet from {source_lang} to {target_lang}...\"\n )\n\n self.worker = Worker(source_lang, target_lang, code)\n self.thread = threading.Thread(target=self.worker.run)\n self.worker.finished.connect(self.on_finished)\n self.thread.start()\n\n def on_finished(self, processed_text):\n self.processed_text = processed_text\n self.current_typing_position = 0\n self.typing_timer.start(20)\n\n def type_next_character(self):\n if self.current_typing_position < len(self.processed_text):\n if self.current_typing_position == 0:\n self.generated_text_area.clear()\n\n current_text = self.processed_text[self.current_typing_position]\n self.generated_text_area.moveCursor(QTextCursor.End)\n self.generated_text_area.insertPlainText(current_text)\n self.current_typing_position += 1\n else:\n self.typing_timer.stop() " } ]
import sys from PyQt5.QtWidgets import QApplication, QMainWindow, QTabWidget, QWidget, QTabBar from features.documenter import CodeDocumenter from features.optimizer import CodeOptimizer from features.summarizer import CodeSummarizer from features.translator import CodeTranslator
4,645
class StretchedTabBar(QTabBar): def __init__(self, parent=None): super().__init__(parent) def tabSizeHint(self, index): size = super().tabSizeHint(index) if self.count() > 0: size.setWidth(self.parent().width() // self.count()) return size class CDOTSApp(QMainWindow): def __init__(self): super().__init__() self.init_ui() def init_ui(self): self.setWindowTitle("C-DOTS: Your Coding Assistance") self.setGeometry(100, 100, 1600, 900) # Create the tab widget with a stretched tab bar tab_widget = QTabWidget() tab_widget.setTabBar(StretchedTabBar(tab_widget)) tab_widget.setStyleSheet( """ QTabBar::tab { background-color: #333333; color: #CCCCCC; padding: 15px; font-size:20px; font-weight:500; } QTabBar::tab:selected { background: #007BFF; color: #FFFFFF; } QTabBar::tab:hover { background: #555555; color:#FFFFFF; } """ ) # Add tabs
class StretchedTabBar(QTabBar): def __init__(self, parent=None): super().__init__(parent) def tabSizeHint(self, index): size = super().tabSizeHint(index) if self.count() > 0: size.setWidth(self.parent().width() // self.count()) return size class CDOTSApp(QMainWindow): def __init__(self): super().__init__() self.init_ui() def init_ui(self): self.setWindowTitle("C-DOTS: Your Coding Assistance") self.setGeometry(100, 100, 1600, 900) # Create the tab widget with a stretched tab bar tab_widget = QTabWidget() tab_widget.setTabBar(StretchedTabBar(tab_widget)) tab_widget.setStyleSheet( """ QTabBar::tab { background-color: #333333; color: #CCCCCC; padding: 15px; font-size:20px; font-weight:500; } QTabBar::tab:selected { background: #007BFF; color: #FFFFFF; } QTabBar::tab:hover { background: #555555; color:#FFFFFF; } """ ) # Add tabs
tab_widget.addTab(CodeDocumenter(), "Documenter")
0
2023-12-07 10:48:28+00:00
8k
amadad/agentcy3
agency_swarm/agency/agency.py
[ { "identifier": "Agent", "path": "agency_swarm/agents/agent.py", "snippet": "class Agent():\n @property\n def assistant(self):\n if self._assistant is None:\n raise Exception(\"Assistant is not initialized. Please run init_oai() first.\")\n return self._assistant\n\n @assistant.setter\n def assistant(self, value):\n self._assistant = value\n\n @property\n def functions(self):\n return [tool for tool in self.tools if issubclass(tool, BaseTool)]\n\n def __init__(self, id: str = None, name: str = None, description: str = None, instructions: str = \"\",\n tools: List[Union[Type[BaseTool], Type[Retrieval], Type[CodeInterpreter]]] = None,\n files_folder: Union[List[str], str] = None,\n file_ids: List[str] = None, metadata: Dict[str, str] = None, model: str = \"gpt-4-1106-preview\"):\n \"\"\"\n Initializes an Agent with specified attributes, tools, and OpenAI client.\n\n Parameters:\n id (str, optional): Unique identifier for the agent. Defaults to None.\n name (str, optional): Name of the agent. Defaults to the class name if not provided.\n description (str, optional): A brief description of the agent's purpose. Defaults to None.\n instructions (str, optional): Path to a file containing specific instructions for the agent. Defaults to an empty string.\n tools (List[Union[Type[BaseTool], Type[Retrieval], Type[CodeInterpreter]]], optional): A list of tools (as classes) that the agent can use. Defaults to an empty list.\n files_folder (Union[List[str], str], optional): Path or list of paths to directories containing files associated with the agent. Defaults to None.\n file_ids (List[str], optional): List of file IDs for files associated with the agent. Defaults to an empty list.\n metadata (Dict[str, str], optional): Metadata associated with the agent. Defaults to an empty dictionary.\n model (str, optional): The model identifier for the OpenAI API. Defaults to \"gpt-4-1106-preview\".\n\n This constructor sets up the agent with its unique properties, initializes the OpenAI client, reads instructions if provided, and uploads any associated files.\n \"\"\"\n self.id = id\n self.name = name if name else self.__class__.__name__\n self.description = description\n self.instructions = instructions\n self.tools = tools if tools else []\n self.files_folder = files_folder\n self.file_ids = file_ids if file_ids else []\n self.metadata = metadata if metadata else {}\n self.model = model\n\n self._assistant: Any = None\n self._shared_instructions = None\n\n self.client = get_openai_client()\n\n if os.path.isfile(self.instructions):\n self._read_instructions(self.instructions)\n elif os.path.isfile(os.path.join(self.get_class_folder_path(), self.instructions)):\n self._read_instructions(os.path.join(self.get_class_folder_path(), self.instructions))\n\n self._upload_files()\n\n def init_oai(self):\n \"\"\"\n Initializes the OpenAI assistant for the agent.\n\n This method handles the initialization and potential updates of the agent's OpenAI assistant. It loads the assistant based on a saved ID, updates the assistant if necessary, or creates a new assistant if it doesn't exist. After initialization or update, it saves the assistant's settings.\n\n Output:\n self: Returns the agent instance for chaining methods or further processing.\n \"\"\"\n\n # check if settings.json exists\n path = self.get_settings_path()\n\n # load assistant from id\n if self.id:\n self.assistant = self.client.beta.assistants.retrieve(self.id)\n # update assistant if parameters are different\n if not self._check_parameters(self.assistant.model_dump()):\n self._update_assistant()\n return self\n\n # load assistant from settings\n if os.path.exists(path):\n with open(path, 'r') as f:\n settings = json.load(f)\n # iterate settings and find the assistant with the same name\n for assistant_settings in settings:\n if assistant_settings['name'] == self.name:\n self.assistant = self.client.beta.assistants.retrieve(assistant_settings['id'])\n self.id = assistant_settings['id']\n # update assistant if parameters are different\n if not self._check_parameters(self.assistant.model_dump()):\n print(\"Updating assistant... \" + self.name)\n self._update_assistant()\n self._update_settings()\n return self\n # create assistant if settings.json does not exist or assistant with the same name does not exist\n self.assistant = self.client.beta.assistants.create(\n name=self.name,\n description=self.description,\n instructions=self.instructions,\n tools=self.get_oai_tools(),\n file_ids=self.file_ids,\n metadata=self.metadata,\n model=self.model\n )\n\n self.id = self.assistant.id\n\n self._save_settings()\n\n return self\n\n def _update_assistant(self):\n \"\"\"\n Updates the existing assistant's parameters on the OpenAI server.\n\n This method updates the assistant's details such as name, description, instructions, tools, file IDs, metadata, and the model. It only updates parameters that have non-empty values. After updating the assistant, it also updates the local settings file to reflect these changes.\n\n No input parameters are directly passed to this method as it uses the agent's instance attributes.\n\n No output parameters are returned, but the method updates the assistant's details on the OpenAI server and locally updates the settings file.\n \"\"\"\n\n params = {\n \"name\": self.name,\n \"description\": self.description,\n \"instructions\": self.instructions,\n \"tools\": self.get_oai_tools(),\n \"file_ids\": self.file_ids,\n \"metadata\": self.metadata,\n \"model\": self.model\n }\n params = {k: v for k, v in params.items() if v}\n self.assistant = self.client.beta.assistants.update(\n self.id,\n **params,\n )\n self._update_settings()\n\n def _check_parameters(self, assistant_settings):\n \"\"\"\n Checks if the agent's parameters match with the given assistant settings.\n\n Parameters:\n assistant_settings (dict): A dictionary containing the settings of an assistant.\n\n Returns:\n bool: True if all the agent's parameters match the assistant settings, False otherwise.\n\n This method compares the current agent's parameters such as name, description, instructions, tools, file IDs, metadata, and model with the given assistant settings. It uses DeepDiff to compare complex structures like tools and metadata. If any parameter does not match, it returns False; otherwise, it returns True.\n \"\"\"\n\n if self.name != assistant_settings['name']:\n return False\n if self.description != assistant_settings['description']:\n return False\n if self.instructions != assistant_settings['instructions']:\n return False\n tools_diff = DeepDiff(self.get_oai_tools(), assistant_settings['tools'], ignore_order=True)\n if tools_diff != {}:\n return False\n if set(self.file_ids) != set(assistant_settings['file_ids']):\n return False\n metadata_diff = DeepDiff(self.metadata, assistant_settings['metadata'], ignore_order=True)\n if metadata_diff != {}:\n return False\n if self.model != assistant_settings['model']:\n return False\n return True\n\n def _save_settings(self):\n path = self.get_settings_path()\n # check if settings.json exists\n if not os.path.isfile(path):\n with open(path, 'w') as f:\n json.dump([self.assistant.model_dump()], f, indent=4)\n else:\n settings = []\n with open(path, 'r') as f:\n settings = json.load(f)\n settings.append(self.assistant.model_dump())\n with open(path, 'w') as f:\n json.dump(settings, f, indent=4)\n\n def _update_settings(self):\n path = self.get_settings_path()\n # check if settings.json exists\n if os.path.isfile(path):\n settings = []\n with open(path, 'r') as f:\n settings = json.load(f)\n for i, assistant_settings in enumerate(settings):\n if assistant_settings['id'] == self.id:\n settings[i] = self.assistant.model_dump()\n break\n with open(path, 'w') as f:\n json.dump(settings, f, indent=4)\n\n def _read_instructions(self, path):\n with open(path, 'r') as f:\n self.instructions = f.read()\n\n def _upload_files(self):\n if isinstance(self.files_folder, str):\n f_path = self.files_folder\n\n if not os.path.isdir(f_path):\n f_path = os.path.join(self.get_class_folder_path(), self.files_folder)\n\n if os.path.isdir(f_path):\n f_paths = os.listdir(f_path)\n\n f_paths = [f for f in f_paths if not f.startswith(\".\")]\n\n f_paths = [os.path.join(f_path, f) for f in f_paths]\n\n for f_path in f_paths:\n file_id = self._get_id_from_file(f_path)\n if file_id:\n print(\"File already uploaded. Skipping... \" + os.path.basename(f_path))\n self.file_ids.append(file_id)\n else:\n print(\"Uploading new file... \" + os.path.basename(f_path))\n with open(f_path, 'rb') as f:\n file_id = self.client.files.create(file=f, purpose=\"assistants\").id\n self.file_ids.append(file_id)\n self._add_id_to_file(f_path, file_id)\n\n if Retrieval not in self.tools:\n print(\"Detected files without Retrieval. Adding Retrieval tool...\")\n self.add_tool(Retrieval)\n else:\n raise Exception(\"Files folder path is not a directory.\")\n\n def _add_id_to_file(self, f_path, id):\n \"\"\"Add file id to file name\"\"\"\n if os.path.isfile(f_path):\n file_name, file_ext = os.path.splitext(f_path)\n f_path_new = file_name + \"_\" + id + file_ext\n os.rename(f_path, f_path_new)\n return f_path_new\n else:\n raise Exception(\"Items in files folder must be files.\")\n\n def _get_id_from_file(self, f_path):\n \"\"\"Get file id from file name\"\"\"\n if os.path.isfile(f_path):\n file_name, file_ext = os.path.splitext(f_path)\n file_name = os.path.basename(file_name)\n file_name = file_name.split(\"_\")\n if len(file_name) > 1:\n return file_name[-1] if \"file-\" in file_name[-1] else None\n else:\n return None\n else:\n raise Exception(\"Items in files folder must be files.\")\n\n def get_settings_path(self):\n return os.path.join(\"./\", 'settings.json')\n\n def get_class_folder_path(self):\n return os.path.abspath(os.path.dirname(inspect.getfile(self.__class__)))\n\n def set_params(self, **params):\n for k, v in params.items():\n setattr(self, k, v)\n\n def add_tool(self, tool):\n if not isinstance(tool, type):\n raise Exception(\"Tool must not be initialized.\")\n if issubclass(tool, Retrieval):\n # check that tools name is not already in tools\n for t in self.tools:\n if issubclass(t, Retrieval):\n return\n self.tools.append(tool)\n elif issubclass(tool, CodeInterpreter):\n for t in self.tools:\n if issubclass(t, Retrieval):\n return\n self.tools.append(tool)\n elif issubclass(tool, BaseTool):\n for t in self.tools:\n if t.__name__ == tool.__name__:\n self.tools.remove(t)\n self.tools.append(tool)\n else:\n raise Exception(\"Invalid tool type.\")\n\n def add_instructions(self, instructions: str):\n if self._shared_instructions is None:\n self._shared_instructions = instructions\n else:\n self.instructions = self.instructions.replace(self._shared_instructions, \"\")\n self.instructions = self.instructions.strip().strip(\"\\n\")\n self._shared_instructions = instructions\n\n self.instructions = self._shared_instructions + \"\\n\\n\" + self.instructions\n\n def get_oai_tools(self):\n tools = []\n for tool in self.tools:\n if not isinstance(tool, type):\n raise Exception(\"Tool must not be initialized.\")\n\n if issubclass(tool, Retrieval):\n tools.append(tool().model_dump())\n elif issubclass(tool, CodeInterpreter):\n tools.append(tool().model_dump())\n elif issubclass(tool, BaseTool):\n tools.append({\n \"type\": \"function\",\n \"function\": tool.openai_schema\n })\n else:\n raise Exception(\"Invalid tool type.\")\n return tools\n\n def delete_assistant(self):\n self.client.beta.assistants.delete(self.id)\n self._delete_settings()\n\n def _delete_settings(self):\n path = self.get_settings_path()\n # check if settings.json exists\n if os.path.isfile(path):\n settings = []\n with open(path, 'r') as f:\n settings = json.load(f)\n for i, assistant_settings in enumerate(settings):\n if assistant_settings['id'] == self.id:\n settings.pop(i)\n break\n with open(path, 'w') as f:\n json.dump(settings, f, indent=4)" }, { "identifier": "Thread", "path": "agency_swarm/threads/thread.py", "snippet": "class Thread:\n id: str\n thread = None\n run = None\n\n def __init__(self, agent: Literal[Agent, User], recipient_agent: Agent):\n self.agent = agent\n self.recipient_agent = recipient_agent\n self.client = get_openai_client()\n\n def get_completion(self, message: str, yield_messages=True):\n if not self.thread:\n self.thread = self.client.beta.threads.create()\n self.id = self.thread.id\n\n # send message\n self.client.beta.threads.messages.create(\n thread_id=self.thread.id,\n role=\"user\",\n content=message\n )\n\n if yield_messages:\n yield MessageOutput(\"text\", self.agent.name, self.recipient_agent.name, message)\n\n # create run\n self.run = self.client.beta.threads.runs.create(\n thread_id=self.thread.id,\n assistant_id=self.recipient_agent.id,\n )\n\n while True:\n # wait until run completes\n while self.run.status in ['queued', 'in_progress']:\n time.sleep(0.5)\n self.run = self.client.beta.threads.runs.retrieve(\n thread_id=self.thread.id,\n run_id=self.run.id\n )\n\n # function execution\n if self.run.status == \"requires_action\":\n tool_calls = self.run.required_action.submit_tool_outputs.tool_calls\n tool_outputs = []\n for tool_call in tool_calls:\n if yield_messages:\n yield MessageOutput(\"function\", self.recipient_agent.name, self.agent.name, str(tool_call.function))\n\n output = self._execute_tool(tool_call)\n if inspect.isgenerator(output):\n try:\n while True:\n item = next(output)\n if isinstance(item, MessageOutput) and yield_messages:\n yield item\n except StopIteration as e:\n output = e.value\n else:\n if yield_messages:\n yield MessageOutput(\"function_output\", tool_call.function.name, self.recipient_agent.name, output)\n\n tool_outputs.append({\"tool_call_id\": tool_call.id, \"output\": str(output)})\n\n # submit tool outputs\n self.run = self.client.beta.threads.runs.submit_tool_outputs(\n thread_id=self.thread.id,\n run_id=self.run.id,\n tool_outputs=tool_outputs\n )\n # error\n elif self.run.status == \"failed\":\n raise Exception(\"Run Failed. Error: \", self.run.last_error)\n # return assistant message\n else:\n messages = self.client.beta.threads.messages.list(\n thread_id=self.id\n )\n message = messages.data[0].content[0].text.value\n\n if yield_messages:\n yield MessageOutput(\"text\", self.recipient_agent.name, self.agent.name, message)\n\n return message\n\n def _execute_tool(self, tool_call):\n funcs = self.recipient_agent.functions\n func = next(iter([func for func in funcs if func.__name__ == tool_call.function.name]))\n\n if not func:\n return f\"Error: Function {tool_call.function.name} not found. Available functions: {[func.__name__ for func in funcs]}\"\n\n try:\n # init tool\n func = func(**eval(tool_call.function.arguments))\n # get outputs from the tool\n output = func.run()\n\n return output\n except Exception as e:\n return \"Error: \" + str(e)" }, { "identifier": "BaseTool", "path": "agency_swarm/tools/base_tool.py", "snippet": "class BaseTool(OpenAISchema, ABC):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n @abstractmethod\n def run(self, **kwargs):\n pass" }, { "identifier": "User", "path": "agency_swarm/user/user.py", "snippet": "class User:\n name: str = \"User\"\n\n def __init__(self, name: str = None):\n # later, we can add more attributes to the user like bio, etc\n pass" } ]
import inspect import os import uuid import gradio as gr from enum import Enum from typing import List from pydantic import Field, field_validator from agency_swarm.agents import Agent from agency_swarm.threads import Thread from agency_swarm.tools import BaseTool from agency_swarm.user import User
5,343
self._read_instructions(os.path.join(self.get_class_folder_path(), shared_instructions)) elif os.path.isfile(shared_instructions): self._read_instructions(shared_instructions) else: self.shared_instructions = shared_instructions self._parse_agency_chart(agency_chart) self._create_send_message_tools() self._init_agents() self._init_threads() self.user = User() self.main_thread = Thread(self.user, self.ceo) def get_completion(self, message: str, yield_messages=True): """ Retrieves the completion for a given message from the main thread. Parameters: message (str): The message for which completion is to be retrieved. yield_messages (bool, optional): Flag to determine if intermediate messages should be yielded. Defaults to True. Returns: Generator or final response: Depending on the 'yield_messages' flag, this method returns either a generator yielding intermediate messages or the final response from the main thread. """ gen = self.main_thread.get_completion(message=message, yield_messages=yield_messages) if not yield_messages: while True: try: next(gen) except StopIteration as e: return e.value return gen def demo_gradio(self, height=600): """ Launches a Gradio-based demo interface for the agency chatbot. Parameters: height (int, optional): The height of the chatbot widget in the Gradio interface. Default is 600. This method sets up and runs a Gradio interface, allowing users to interact with the agency's chatbot. It includes a text input for the user's messages and a chatbot interface for displaying the conversation. The method handles user input and chatbot responses, updating the interface dynamically. """ try: except ImportError: raise Exception("Please install gradio: pip install gradio") with gr.Blocks() as demo: chatbot = gr.Chatbot(height=height) msg = gr.Textbox() def user(user_message, history): # Append the user message with a placeholder for bot response user_message = "👤 User: " + user_message.strip() return "", history + [[user_message, None]] def bot(history): # Replace this with your actual chatbot logic gen = self.get_completion(message=history[-1][0]) try: # Yield each message from the generator for bot_message in gen: if bot_message.sender_name.lower() == "user": continue message = bot_message.get_sender_emoji() + " " + bot_message.get_formatted_content() history.append((None, message)) yield history except StopIteration: # Handle the end of the conversation if necessary pass # Chain the events msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( bot, chatbot, chatbot ) # Enable queuing for streaming intermediate outputs demo.queue() # Launch the demo demo.launch() def run_demo(self): """ Runs a demonstration of the agency's capabilities in an interactive command line interface. This function continuously prompts the user for input and displays responses from the agency's main thread. It leverages the generator pattern for asynchronous message processing. Output: Outputs the responses from the agency's main thread to the command line. """ while True: text = input("USER: ") try: gen = self.main_thread.get_completion(message=text) while True: message = next(gen) message.cprint() except StopIteration as e: pass def _parse_agency_chart(self, agency_chart): """ Parses the provided agency chart to initialize and organize agents within the agency. Parameters: agency_chart: A structure representing the hierarchical organization of agents within the agency. It can contain Agent objects and lists of Agent objects. This method iterates through each node in the agency chart. If a node is an Agent, it is set as the CEO if not already assigned. If a node is a list, it iterates through the agents in the list, adding them to the agency and establishing communication threads between them. It raises an exception if the agency chart is invalid or if multiple CEOs are defined. """ for node in agency_chart:
class Agency: def __init__(self, agency_chart, shared_instructions=""): """ Initializes the Agency object, setting up agents, threads, and core functionalities. Parameters: agency_chart: The structure defining the hierarchy and interaction of agents within the agency. shared_instructions (str, optional): A path to a file containing shared instructions for all agents. Defaults to an empty string. This constructor initializes various components of the Agency, including CEO, agents, threads, and user interactions. It parses the agency chart to set up the organizational structure and initializes the messaging tools, agents, and threads necessary for the operation of the agency. Additionally, it prepares a main thread for user interactions. """ self.ceo = None self.agents = [] self.agents_and_threads = {} if os.path.isfile(os.path.join(self.get_class_folder_path(), shared_instructions)): self._read_instructions(os.path.join(self.get_class_folder_path(), shared_instructions)) elif os.path.isfile(shared_instructions): self._read_instructions(shared_instructions) else: self.shared_instructions = shared_instructions self._parse_agency_chart(agency_chart) self._create_send_message_tools() self._init_agents() self._init_threads() self.user = User() self.main_thread = Thread(self.user, self.ceo) def get_completion(self, message: str, yield_messages=True): """ Retrieves the completion for a given message from the main thread. Parameters: message (str): The message for which completion is to be retrieved. yield_messages (bool, optional): Flag to determine if intermediate messages should be yielded. Defaults to True. Returns: Generator or final response: Depending on the 'yield_messages' flag, this method returns either a generator yielding intermediate messages or the final response from the main thread. """ gen = self.main_thread.get_completion(message=message, yield_messages=yield_messages) if not yield_messages: while True: try: next(gen) except StopIteration as e: return e.value return gen def demo_gradio(self, height=600): """ Launches a Gradio-based demo interface for the agency chatbot. Parameters: height (int, optional): The height of the chatbot widget in the Gradio interface. Default is 600. This method sets up and runs a Gradio interface, allowing users to interact with the agency's chatbot. It includes a text input for the user's messages and a chatbot interface for displaying the conversation. The method handles user input and chatbot responses, updating the interface dynamically. """ try: except ImportError: raise Exception("Please install gradio: pip install gradio") with gr.Blocks() as demo: chatbot = gr.Chatbot(height=height) msg = gr.Textbox() def user(user_message, history): # Append the user message with a placeholder for bot response user_message = "👤 User: " + user_message.strip() return "", history + [[user_message, None]] def bot(history): # Replace this with your actual chatbot logic gen = self.get_completion(message=history[-1][0]) try: # Yield each message from the generator for bot_message in gen: if bot_message.sender_name.lower() == "user": continue message = bot_message.get_sender_emoji() + " " + bot_message.get_formatted_content() history.append((None, message)) yield history except StopIteration: # Handle the end of the conversation if necessary pass # Chain the events msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( bot, chatbot, chatbot ) # Enable queuing for streaming intermediate outputs demo.queue() # Launch the demo demo.launch() def run_demo(self): """ Runs a demonstration of the agency's capabilities in an interactive command line interface. This function continuously prompts the user for input and displays responses from the agency's main thread. It leverages the generator pattern for asynchronous message processing. Output: Outputs the responses from the agency's main thread to the command line. """ while True: text = input("USER: ") try: gen = self.main_thread.get_completion(message=text) while True: message = next(gen) message.cprint() except StopIteration as e: pass def _parse_agency_chart(self, agency_chart): """ Parses the provided agency chart to initialize and organize agents within the agency. Parameters: agency_chart: A structure representing the hierarchical organization of agents within the agency. It can contain Agent objects and lists of Agent objects. This method iterates through each node in the agency chart. If a node is an Agent, it is set as the CEO if not already assigned. If a node is a list, it iterates through the agents in the list, adding them to the agency and establishing communication threads between them. It raises an exception if the agency chart is invalid or if multiple CEOs are defined. """ for node in agency_chart:
if isinstance(node, Agent):
0
2023-12-14 01:40:32+00:00
8k
Deltares/imod-python
imod/mf6/evt.py
[ { "identifier": "add_periodic_auxiliary_variable", "path": "imod/mf6/auxiliary_variables.py", "snippet": "def add_periodic_auxiliary_variable(package):\n if hasattr(package, \"_auxiliary_data\"):\n for aux_var_name, aux_var_dimensions in package._auxiliary_data.items():\n aux_coords = package.dataset[aux_var_name].coords[aux_var_dimensions].values\n for s in aux_coords:\n package.dataset[s] = package.dataset[aux_var_name].sel(\n {aux_var_dimensions: s}\n )" }, { "identifier": "BoundaryCondition", "path": "imod/mf6/boundary_condition.py", "snippet": "class BoundaryCondition(Package, abc.ABC):\n \"\"\"\n BoundaryCondition is used to share methods for specific stress packages\n with a time component.\n\n It is not meant to be used directly, only to inherit from, to implement new\n packages.\n\n This class only supports `list input\n <https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=19>`_,\n not the array input which is used in :class:`Package`.\n \"\"\"\n\n def _max_active_n(self):\n \"\"\"\n Determine the maximum active number of cells that are active\n during a stress period.\n \"\"\"\n da = self.dataset[self.get_period_varnames()[0]]\n if \"time\" in da.coords:\n nmax = int(da.groupby(\"time\").count(xr.ALL_DIMS).max())\n else:\n nmax = int(da.count())\n return nmax\n\n def _write_binaryfile(self, outpath, struct_array):\n with open(outpath, \"w\") as f:\n struct_array.tofile(f)\n\n def _write_textfile(self, outpath, struct_array):\n fields = struct_array.dtype.fields\n fmt = [self._number_format(field[0]) for field in fields.values()]\n header = \" \".join(list(fields.keys()))\n with open(outpath, \"w\") as f:\n np.savetxt(fname=f, X=struct_array, fmt=fmt, header=header)\n\n def _write_datafile(self, outpath, ds, binary):\n \"\"\"\n Writes a modflow6 binary data file\n \"\"\"\n layer = ds[\"layer\"].values if \"layer\" in ds.coords else None\n arrdict = self._ds_to_arrdict(ds)\n struct_array = self._to_struct_array(arrdict, layer)\n outpath.parent.mkdir(exist_ok=True, parents=True)\n if binary:\n self._write_binaryfile(outpath, struct_array)\n else:\n self._write_textfile(outpath, struct_array)\n\n def _ds_to_arrdict(self, ds):\n for datavar in ds.data_vars:\n if ds[datavar].shape == ():\n raise ValueError(\n f\"{datavar} in {self._pkg_id} package cannot be a scalar\"\n )\n\n arrdict = {}\n for datavar in ds.data_vars:\n arrdict[datavar] = ds[datavar].values\n\n return arrdict\n\n def _to_struct_array(self, arrdict, layer):\n \"\"\"Convert from dense arrays to list based input\"\"\"\n # TODO stream the data per stress period\n # TODO add pkgcheck that period table aligns\n # Get the number of valid values\n if layer is None:\n raise ValueError(\"Layer should be provided\")\n\n data = next(iter(arrdict.values()))\n notnull = ~np.isnan(data)\n\n if isinstance(self.dataset, xr.Dataset):\n recarr = _dis_recarr(arrdict, layer, notnull)\n elif isinstance(self.dataset, xu.UgridDataset):\n recarr = _disv_recarr(arrdict, layer, notnull)\n else:\n raise TypeError(\n \"self.dataset should be xarray.Dataset or xugrid.UgridDataset,\"\n f\" is {type(self.dataset)} instead\"\n )\n # Fill in the data\n for key, arr in arrdict.items():\n values = arr[notnull].astype(np.float64)\n recarr[key] = values\n\n return recarr\n\n def _period_paths(self, directory, pkgname, globaltimes, bin_ds, binary):\n directory = pathlib.Path(directory) / pkgname\n\n if binary:\n ext = \"bin\"\n else:\n ext = \"dat\"\n\n periods = {}\n if \"time\" in bin_ds: # one of bin_ds has time\n package_times = bin_ds.coords[\"time\"].values\n starts = np.searchsorted(globaltimes, package_times) + 1\n for i, start in enumerate(starts):\n path = directory / f\"{self._pkg_id}-{i}.{ext}\"\n periods[start] = path.as_posix()\n\n repeat_stress = self.dataset.get(\"repeat_stress\")\n if repeat_stress is not None and repeat_stress.values[()] is not None:\n keys = repeat_stress.isel(repeat_items=0).values\n values = repeat_stress.isel(repeat_items=1).values\n repeat_starts = np.searchsorted(globaltimes, keys) + 1\n values_index = np.searchsorted(globaltimes, values) + 1\n for i, start in zip(values_index, repeat_starts):\n periods[start] = periods[i]\n # Now make sure the periods are sorted by key.\n periods = dict(sorted(periods.items()))\n else:\n path = directory / f\"{self._pkg_id}.{ext}\"\n periods[1] = path.as_posix()\n\n return periods\n\n def _get_options(self, predefined_options: Dict, not_options: List = None):\n options = copy(predefined_options)\n\n if not_options is None:\n not_options = self.get_period_varnames()\n\n for varname in self.dataset.data_vars.keys(): # pylint:disable=no-member\n if varname in not_options:\n continue\n v = self.dataset[varname].values[()]\n if self._valid(v): # skip None and False\n options[varname] = v\n return options\n\n def _get_bin_ds(self):\n \"\"\"\n Get binary dataset data for stress periods, this data will be written to\n datafiles. This method can be overriden to do some extra operations on\n this dataset before writing.\n \"\"\"\n return self[self.get_period_varnames()]\n\n def render(self, directory, pkgname, globaltimes, binary):\n \"\"\"Render fills in the template only, doesn't write binary data\"\"\"\n d = {\"binary\": binary}\n bin_ds = self._get_bin_ds()\n d[\"periods\"] = self._period_paths(\n directory, pkgname, globaltimes, bin_ds, binary\n )\n # construct the rest (dict for render)\n d = self._get_options(d)\n d[\"maxbound\"] = self._max_active_n()\n\n if (hasattr(self, \"_auxiliary_data\")) and (names := get_variable_names(self)):\n d[\"auxiliary\"] = names\n\n return self._template.render(d)\n\n def _write_perioddata(self, directory, pkgname, binary):\n if len(self.get_period_varnames()) == 0:\n return\n bin_ds = self._get_bin_ds()\n\n if binary:\n ext = \"bin\"\n else:\n ext = \"dat\"\n\n if \"time\" in bin_ds: # one of bin_ds has time\n for i in range(len(self.dataset.time)):\n path = directory / pkgname / f\"{self._pkg_id}-{i}.{ext}\"\n self._write_datafile(\n path, bin_ds.isel(time=i), binary=binary\n ) # one timestep\n else:\n path = directory / pkgname / f\"{self._pkg_id}.{ext}\"\n self._write_datafile(path, bin_ds, binary=binary)\n\n def write(self, pkgname: str, globaltimes: np.ndarray, write_context: WriteContext):\n \"\"\"\n writes the blockfile and binary data\n\n directory is modelname\n \"\"\"\n\n super().write(pkgname, globaltimes, write_context)\n directory = write_context.write_directory\n\n self._write_perioddata(\n directory=directory,\n pkgname=pkgname,\n binary=write_context.use_binary,\n )\n\n def get_period_varnames(self):\n result = []\n if hasattr(self, \"_period_data\"):\n result.extend(self._period_data)\n if hasattr(self, \"_auxiliary_data\"):\n result.extend(get_variable_names(self))\n\n return result" }, { "identifier": "RegridderType", "path": "imod/mf6/regridding_utils.py", "snippet": "class RegridderType(Enum):\n \"\"\"\n Enumerator referring to regridder types in ``xugrid``.\n These can be used safely in scripts, remaining backwards compatible for\n when it is decided to rename regridders in ``xugrid``. For an explanation\n what each regridder type does, we refer to the `xugrid documentation <https://deltares.github.io/xugrid/examples/regridder_overview.html>`_\n \"\"\"\n\n CENTROIDLOCATOR = xu.CentroidLocatorRegridder\n BARYCENTRIC = xu.BarycentricInterpolator\n OVERLAP = xu.OverlapRegridder\n RELATIVEOVERLAP = xu.RelativeOverlapRegridder" }, { "identifier": "BOUNDARY_DIMS_SCHEMA", "path": "imod/mf6/validation.py", "snippet": "BOUNDARY_DIMS_SCHEMA = (\n DimsSchema(\"time\", \"layer\", \"y\", \"x\")\n | DimsSchema(\"layer\", \"y\", \"x\")\n | DimsSchema(\"time\", \"layer\", \"{face_dim}\")\n | DimsSchema(\"layer\", \"{face_dim}\")\n # Layer dim not necessary, as long as there is a layer coordinate present.\n | DimsSchema(\"time\", \"y\", \"x\")\n | DimsSchema(\"y\", \"x\")\n | DimsSchema(\"time\", \"{face_dim}\")\n | DimsSchema(\"{face_dim}\")\n)" }, { "identifier": "AllInsideNoDataSchema", "path": "imod/schemata.py", "snippet": "class AllInsideNoDataSchema(NoDataComparisonSchema):\n \"\"\"\n Checks that all notnull values all occur within the notnull values of other.\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n other_obj = kwargs[self.other]\n valid = self.is_notnull(obj)\n other_valid = self.is_other_notnull(other_obj)\n\n if (valid & ~other_valid).any():\n raise ValidationError(f\"data values found at nodata values of {self.other}\")" }, { "identifier": "AllNoDataSchema", "path": "imod/schemata.py", "snippet": "class AllNoDataSchema(NoDataSchema):\n \"\"\"\n Fails when all data is NoData.\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n valid = self.is_notnull(obj)\n if ~valid.any():\n raise ValidationError(\"all nodata\")" }, { "identifier": "AllValueSchema", "path": "imod/schemata.py", "snippet": "class AllValueSchema(ValueSchema):\n \"\"\"\n Validate whether all values pass a condition.\n\n E.g. if operator is \">\":\n\n assert (values > threshold).all()\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n if isinstance(self.other, str):\n other_obj = kwargs[self.other]\n else:\n other_obj = self.other\n\n if scalar_None(obj) or scalar_None(other_obj):\n return\n\n explicitly_ignored = self.get_explicitly_ignored(kwargs)\n\n ignore = (\n np.isnan(obj) | np.isnan(other_obj) | explicitly_ignored\n ) # ignore nan by setting to True\n\n condition = self.operator(obj, other_obj)\n condition = condition | ignore\n if not condition.all():\n raise ValidationError(\n f\"not all values comply with criterion: {self.operator_str} {self.other}\"\n )" }, { "identifier": "CoordsSchema", "path": "imod/schemata.py", "snippet": "class CoordsSchema(BaseSchema):\n \"\"\"\n Validate presence of coords.\n\n Parameters\n ----------\n coords : dict_like\n coords of the DataArray. `None` may be used as a wildcard value.\n \"\"\"\n\n def __init__(\n self,\n coords: Tuple[str],\n require_all_keys: bool = True,\n allow_extra_keys: bool = True,\n ) -> None:\n self.coords = coords\n self.require_all_keys = require_all_keys\n self.allow_extra_keys = allow_extra_keys\n\n def validate(self, obj: xr.DataArray, **kwargs) -> None:\n coords = list(obj.coords.keys())\n\n if self.require_all_keys:\n missing_keys = set(self.coords) - set(coords)\n if missing_keys:\n raise ValidationError(f\"coords has missing keys: {missing_keys}\")\n\n if not self.allow_extra_keys:\n extra_keys = set(coords) - set(self.coords)\n if extra_keys:\n raise ValidationError(f\"coords has extra keys: {extra_keys}\")\n\n for key in self.coords:\n if key not in coords:\n raise ValidationError(f\"key {key} not in coords\")" }, { "identifier": "DimsSchema", "path": "imod/schemata.py", "snippet": "class DimsSchema(BaseSchema):\n def __init__(self, *dims: DimsT) -> None:\n self.dims = dims\n\n def _fill_in_face_dim(self, obj: Union[xr.DataArray, xu.UgridDataArray]):\n \"\"\"\n Return dims with a filled in face dim if necessary.\n \"\"\"\n if \"{face_dim}\" in self.dims and isinstance(obj, xu.UgridDataArray):\n return tuple(\n (\n obj.ugrid.grid.face_dimension if i == \"{face_dim}\" else i\n for i in self.dims\n )\n )\n elif \"{edge_dim}\" in self.dims and isinstance(obj, xu.UgridDataArray):\n return tuple(\n (\n obj.ugrid.grid.edge_dimension if i == \"{edge_dim}\" else i\n for i in self.dims\n )\n )\n else:\n return self.dims\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> None:\n \"\"\"Validate dimensions\n Parameters\n ----------\n dims : Tuple[Union[str, None]]\n Dimensions of the DataArray. `None` may be used as a wildcard value.\n \"\"\"\n dims = self._fill_in_face_dim(obj)\n # Force to tuple for error message print\n expected = tuple(dims)\n actual = tuple(obj.dims)\n if actual != expected:\n raise ValidationError(f\"dim mismatch: expected {expected}, got {actual}\")" }, { "identifier": "DTypeSchema", "path": "imod/schemata.py", "snippet": "class DTypeSchema(BaseSchema):\n def __init__(self, dtype: DTypeLike) -> None:\n if dtype in [\n np.floating,\n np.integer,\n np.signedinteger,\n np.unsignedinteger,\n np.generic,\n ]:\n self.dtype = dtype\n else:\n self.dtype = np.dtype(dtype)\n\n def validate(self, obj: xr.DataArray, **kwargs) -> None:\n \"\"\"\n Validate dtype\n\n Parameters\n ----------\n dtype : Any\n Dtype of the DataArray.\n \"\"\"\n if scalar_None(obj):\n return\n\n if not np.issubdtype(obj.dtype, self.dtype):\n raise ValidationError(f\"dtype {obj.dtype} != {self.dtype}\")" }, { "identifier": "IdentityNoDataSchema", "path": "imod/schemata.py", "snippet": "class IdentityNoDataSchema(NoDataComparisonSchema):\n \"\"\"\n Checks that the NoData values are located at exactly the same locations.\n\n Tests only if if all dimensions of the other object are present in the\n object. So tests if \"stage\" with `{time, layer, y, x}` compared to \"idomain\"\n `{layer, y, x}` but doesn't test if \"k\" with `{layer}` is comperated to\n \"idomain\" `{layer, y, x}`\n \"\"\"\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n other_obj = kwargs[self.other]\n\n # Only test if object has all dimensions in other object.\n missing_dims = set(other_obj.dims) - set(obj.dims)\n\n if len(missing_dims) == 0:\n valid = self.is_notnull(obj)\n other_valid = self.is_other_notnull(other_obj)\n if (valid ^ other_valid).any():\n raise ValidationError(f\"nodata is not aligned with {self.other}\")" }, { "identifier": "IndexesSchema", "path": "imod/schemata.py", "snippet": "class IndexesSchema(EmptyIndexesSchema):\n \"\"\"\n Verify indexes, check if no dims with zero size are included and that\n indexes are monotonic. Skips unstructured grid dimensions.\n \"\"\"\n\n def __init__(self) -> None:\n pass\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs) -> None:\n # Test if indexes all empty\n super().validate(obj)\n\n dims_to_validate = self.get_dims_to_validate(obj)\n\n for dim in dims_to_validate:\n if dim == \"y\":\n if not obj.indexes[dim].is_monotonic_decreasing:\n raise ValidationError(\n f\"coord {dim} which is not monotonically decreasing\"\n )\n\n else:\n if not obj.indexes[dim].is_monotonic_increasing:\n raise ValidationError(\n f\"coord {dim} which is not monotonically increasing\"\n )" }, { "identifier": "OtherCoordsSchema", "path": "imod/schemata.py", "snippet": "class OtherCoordsSchema(BaseSchema):\n \"\"\"\n Validate whether coordinates match those of other.\n \"\"\"\n\n def __init__(\n self,\n other: str,\n require_all_keys: bool = True,\n allow_extra_keys: bool = True,\n ):\n self.other = other\n self.require_all_keys = require_all_keys\n self.allow_extra_keys = allow_extra_keys\n\n def validate(self, obj: Union[xr.DataArray, xu.UgridDataArray], **kwargs):\n other_obj = kwargs[self.other]\n other_coords = list(other_obj.coords.keys())\n return CoordsSchema(\n other_coords,\n self.require_all_keys,\n self.allow_extra_keys,\n ).validate(obj)" }, { "identifier": "unstack_dim_into_variable", "path": "imod/util.py", "snippet": "def unstack_dim_into_variable(\n dataset: Union[xr.Dataset, xu.UgridDataset], dim: str\n) -> Union[xr.Dataset, xu.UgridDataset]:\n \"\"\"\n Unstack each variable containing ``dim`` into separate variables.\n \"\"\"\n unstacked = dataset.copy()\n\n variables_containing_dim = [\n variable for variable in dataset.data_vars if dim in dataset[variable].dims\n ]\n\n for variable in variables_containing_dim:\n stacked = unstacked[variable]\n unstacked = unstacked.drop_vars(variable)\n for index in stacked[dim].values:\n unstacked[f\"{variable}_{dim}_{index}\"] = stacked.sel(\n indexers={dim: index}, drop=True\n )\n if dim in unstacked.coords:\n unstacked = unstacked.drop_vars(dim)\n return unstacked" } ]
from typing import Dict, List from imod.mf6.auxiliary_variables import add_periodic_auxiliary_variable from imod.mf6.boundary_condition import BoundaryCondition from imod.mf6.regridding_utils import RegridderType from imod.mf6.validation import BOUNDARY_DIMS_SCHEMA from imod.schemata import ( AllInsideNoDataSchema, AllNoDataSchema, AllValueSchema, CoordsSchema, DimsSchema, DTypeSchema, IdentityNoDataSchema, IndexesSchema, OtherCoordsSchema, ) from imod.util import unstack_dim_into_variable import numpy as np
6,021
| DimsSchema("segment", "time", "layer", "{face_dim}") | DimsSchema("segment", "layer", "{face_dim}") # Layer dim not necessary, as long as there is a layer coordinate present. | DimsSchema("segment", "time", "y", "x") | DimsSchema("segment", "y", "x") | DimsSchema("segment", "time", "{face_dim}") | DimsSchema("segment", "{face_dim}") ) class Evapotranspiration(BoundaryCondition): """ Evapotranspiration (EVT) Package. Any number of EVT Packages can be specified for a single groundwater flow model. All single-valued variables are free format. https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=86 Parameters ---------- surface: array of floats (xr.DataArray) is the elevation of the ET surface (L). A time-series name may be specified. rate: array of floats (xr.DataArray) is the maximum ET flux rate (LT −1). A time-series name may be specified. depth: array of floats (xr.DataArray) is the ET extinction depth (L). A time-series name may be specified. proportion_rate: array of floats (xr.DataArray) is the proportion of the maximum ET flux rate at the bottom of a segment (dimensionless). A time-series name may be specified. (petm) proportion_depth: array of floats (xr.DataArray) is the proportion of the ET extinction depth at the bottom of a segment (dimensionless). A timeseries name may be specified. (pxdp) concentration: array of floats (xr.DataArray, optional) if this flow package is used in simulations also involving transport, then this array is used as the concentration for inflow over this boundary. concentration_boundary_type: ({"AUX", "AUXMIXED"}, optional) if this flow package is used in simulations also involving transport, then this keyword specifies how outflow over this boundary is computed. fixed_cell: array of floats (xr.DataArray) indicates that evapotranspiration will not be reassigned to a cell underlying the cell specified in the list if the specified cell is inactive. print_input: ({True, False}, optional) keyword to indicate that the list of evapotranspiration information will be written to the listing file immediately after it is read. Default is False. print_flows: ({True, False}, optional) Indicates that the list of evapotranspiration flow rates will be printed to the listing file for every stress period time step in which "BUDGET PRINT" is specified in Output Control. If there is no Output Control option and PRINT FLOWS is specified, then flow rates are printed for the last time step of each stress period. Default is False. save_flows: ({True, False}, optional) Indicates that evapotranspiration flow terms will be written to the file specified with "BUDGET FILEOUT" in Output Control. Default is False. observations: [Not yet supported.] Default is None. validate: {True, False} Flag to indicate whether the package should be validated upon initialization. This raises a ValidationError if package input is provided in the wrong manner. Defaults to True. repeat_stress: Optional[xr.DataArray] of datetimes Used to repeat data for e.g. repeating stress periods such as seasonality without duplicating the values. The DataArray should have dimensions ``("repeat", "repeat_items")``. The ``repeat_items`` dimension should have size 2: the first value is the "key", the second value is the "value". For the "key" datetime, the data of the "value" datetime will be used. Can also be set with a dictionary using the ``set_repeat_stress`` method. """ _pkg_id = "evt" _init_schemata = { "surface": [ DTypeSchema(np.floating), IndexesSchema(), CoordsSchema(("layer",)), BOUNDARY_DIMS_SCHEMA, ], "rate": [ DTypeSchema(np.floating), IndexesSchema(), CoordsSchema(("layer",)), BOUNDARY_DIMS_SCHEMA, ], "depth": [ DTypeSchema(np.floating), IndexesSchema(), CoordsSchema(("layer",)), BOUNDARY_DIMS_SCHEMA, ], "proportion_rate": [ DTypeSchema(np.floating), IndexesSchema(), CoordsSchema(("layer",)), SEGMENT_BOUNDARY_DIMS_SCHEMA, ], "proportion_depth": [ DTypeSchema(np.floating), IndexesSchema(), CoordsSchema(("layer",)), SEGMENT_BOUNDARY_DIMS_SCHEMA, ], "print_flows": [DTypeSchema(np.bool_), DimsSchema()], "save_flows": [DTypeSchema(np.bool_), DimsSchema()], } _write_schemata = { "surface": [ OtherCoordsSchema("idomain"), AllNoDataSchema(), # Check for all nan, can occur while clipping AllInsideNoDataSchema(other="idomain", is_other_notnull=(">", 0)), ], "rate": [IdentityNoDataSchema("surface")], "depth": [IdentityNoDataSchema("surface")], "proportion_rate": [IdentityNoDataSchema("surface")], "proportion_depth": [ IdentityNoDataSchema("surface"),
SEGMENT_BOUNDARY_DIMS_SCHEMA = ( BOUNDARY_DIMS_SCHEMA | DimsSchema("segment", "time", "layer", "y", "x") | DimsSchema("segment", "layer", "y", "x") | DimsSchema("segment", "time", "layer", "{face_dim}") | DimsSchema("segment", "layer", "{face_dim}") # Layer dim not necessary, as long as there is a layer coordinate present. | DimsSchema("segment", "time", "y", "x") | DimsSchema("segment", "y", "x") | DimsSchema("segment", "time", "{face_dim}") | DimsSchema("segment", "{face_dim}") ) class Evapotranspiration(BoundaryCondition): """ Evapotranspiration (EVT) Package. Any number of EVT Packages can be specified for a single groundwater flow model. All single-valued variables are free format. https://water.usgs.gov/water-resources/software/MODFLOW-6/mf6io_6.0.4.pdf#page=86 Parameters ---------- surface: array of floats (xr.DataArray) is the elevation of the ET surface (L). A time-series name may be specified. rate: array of floats (xr.DataArray) is the maximum ET flux rate (LT −1). A time-series name may be specified. depth: array of floats (xr.DataArray) is the ET extinction depth (L). A time-series name may be specified. proportion_rate: array of floats (xr.DataArray) is the proportion of the maximum ET flux rate at the bottom of a segment (dimensionless). A time-series name may be specified. (petm) proportion_depth: array of floats (xr.DataArray) is the proportion of the ET extinction depth at the bottom of a segment (dimensionless). A timeseries name may be specified. (pxdp) concentration: array of floats (xr.DataArray, optional) if this flow package is used in simulations also involving transport, then this array is used as the concentration for inflow over this boundary. concentration_boundary_type: ({"AUX", "AUXMIXED"}, optional) if this flow package is used in simulations also involving transport, then this keyword specifies how outflow over this boundary is computed. fixed_cell: array of floats (xr.DataArray) indicates that evapotranspiration will not be reassigned to a cell underlying the cell specified in the list if the specified cell is inactive. print_input: ({True, False}, optional) keyword to indicate that the list of evapotranspiration information will be written to the listing file immediately after it is read. Default is False. print_flows: ({True, False}, optional) Indicates that the list of evapotranspiration flow rates will be printed to the listing file for every stress period time step in which "BUDGET PRINT" is specified in Output Control. If there is no Output Control option and PRINT FLOWS is specified, then flow rates are printed for the last time step of each stress period. Default is False. save_flows: ({True, False}, optional) Indicates that evapotranspiration flow terms will be written to the file specified with "BUDGET FILEOUT" in Output Control. Default is False. observations: [Not yet supported.] Default is None. validate: {True, False} Flag to indicate whether the package should be validated upon initialization. This raises a ValidationError if package input is provided in the wrong manner. Defaults to True. repeat_stress: Optional[xr.DataArray] of datetimes Used to repeat data for e.g. repeating stress periods such as seasonality without duplicating the values. The DataArray should have dimensions ``("repeat", "repeat_items")``. The ``repeat_items`` dimension should have size 2: the first value is the "key", the second value is the "value". For the "key" datetime, the data of the "value" datetime will be used. Can also be set with a dictionary using the ``set_repeat_stress`` method. """ _pkg_id = "evt" _init_schemata = { "surface": [ DTypeSchema(np.floating), IndexesSchema(), CoordsSchema(("layer",)), BOUNDARY_DIMS_SCHEMA, ], "rate": [ DTypeSchema(np.floating), IndexesSchema(), CoordsSchema(("layer",)), BOUNDARY_DIMS_SCHEMA, ], "depth": [ DTypeSchema(np.floating), IndexesSchema(), CoordsSchema(("layer",)), BOUNDARY_DIMS_SCHEMA, ], "proportion_rate": [ DTypeSchema(np.floating), IndexesSchema(), CoordsSchema(("layer",)), SEGMENT_BOUNDARY_DIMS_SCHEMA, ], "proportion_depth": [ DTypeSchema(np.floating), IndexesSchema(), CoordsSchema(("layer",)), SEGMENT_BOUNDARY_DIMS_SCHEMA, ], "print_flows": [DTypeSchema(np.bool_), DimsSchema()], "save_flows": [DTypeSchema(np.bool_), DimsSchema()], } _write_schemata = { "surface": [ OtherCoordsSchema("idomain"), AllNoDataSchema(), # Check for all nan, can occur while clipping AllInsideNoDataSchema(other="idomain", is_other_notnull=(">", 0)), ], "rate": [IdentityNoDataSchema("surface")], "depth": [IdentityNoDataSchema("surface")], "proportion_rate": [IdentityNoDataSchema("surface")], "proportion_depth": [ IdentityNoDataSchema("surface"),
AllValueSchema(">=", 0.0),
6
2023-12-08 13:57:59+00:00
8k
Dong142857/Live3DPortrait
models/eg3d/triplane.py
[ { "identifier": "persistence", "path": "torch_utils/persistence.py", "snippet": "def persistent_class(orig_class):\n def __init__(self, *args, **kwargs):\n def init_args(self):\n def init_kwargs(self):\n def __reduce__(self):\ndef is_persistent(obj):\ndef import_hook(hook):\ndef _reconstruct_persistent_obj(meta):\ndef _module_to_src(module):\ndef _src_to_module(src):\ndef _check_pickleable(obj):\n def recurse(obj):\n class Decorator(orig_class):" }, { "identifier": "Generator", "path": "models/eg3d/networks_stylegan2.py", "snippet": "class Generator(torch.nn.Module):\n def __init__(self,\n z_dim, # Input latent (Z) dimensionality.\n c_dim, # Conditioning label (C) dimensionality.\n w_dim, # Intermediate latent (W) dimensionality.\n img_resolution, # Output resolution.\n img_channels, # Number of output color channels.\n mapping_kwargs = {}, # Arguments for MappingNetwork.\n **synthesis_kwargs, # Arguments for SynthesisNetwork.\n ):\n super().__init__()\n self.z_dim = z_dim\n self.c_dim = c_dim\n self.w_dim = w_dim\n self.img_resolution = img_resolution\n self.img_channels = img_channels\n self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)\n self.num_ws = self.synthesis.num_ws\n self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)\n\n def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):\n ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)\n img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)\n return img" }, { "identifier": "ImportanceRenderer", "path": "models/eg3d/volumetric_rendering/renderer.py", "snippet": "class ImportanceRenderer(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.ray_marcher = MipRayMarcher2()\n self.plane_axes = generate_planes()\n\n def forward(self, planes, decoder, ray_origins, ray_directions, rendering_options):\n self.plane_axes = self.plane_axes.to(ray_origins.device)\n\n if rendering_options['ray_start'] == rendering_options['ray_end'] == 'auto':\n ray_start, ray_end = math_utils.get_ray_limits_box(ray_origins, ray_directions, box_side_length=rendering_options['box_warp'])\n is_ray_valid = ray_end > ray_start\n if torch.any(is_ray_valid).item():\n ray_start[~is_ray_valid] = ray_start[is_ray_valid].min()\n ray_end[~is_ray_valid] = ray_start[is_ray_valid].max()\n depths_coarse = self.sample_stratified(ray_origins, ray_start, ray_end, rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])\n else:\n # Create stratified depth samples\n depths_coarse = self.sample_stratified(ray_origins, rendering_options['ray_start'], rendering_options['ray_end'], rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])\n\n batch_size, num_rays, samples_per_ray, _ = depths_coarse.shape\n\n # Coarse Pass\n sample_coordinates = (ray_origins.unsqueeze(-2) + depths_coarse * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3)\n sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, samples_per_ray, -1).reshape(batch_size, -1, 3)\n\n\n out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options)\n colors_coarse = out['rgb']\n densities_coarse = out['sigma']\n colors_coarse = colors_coarse.reshape(batch_size, num_rays, samples_per_ray, colors_coarse.shape[-1])\n densities_coarse = densities_coarse.reshape(batch_size, num_rays, samples_per_ray, 1)\n\n # Fine Pass\n N_importance = rendering_options['depth_resolution_importance']\n if N_importance > 0:\n _, _, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options)\n\n depths_fine = self.sample_importance(depths_coarse, weights, N_importance)\n\n sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, N_importance, -1).reshape(batch_size, -1, 3)\n sample_coordinates = (ray_origins.unsqueeze(-2) + depths_fine * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3)\n\n out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options)\n colors_fine = out['rgb']\n densities_fine = out['sigma']\n colors_fine = colors_fine.reshape(batch_size, num_rays, N_importance, colors_fine.shape[-1])\n densities_fine = densities_fine.reshape(batch_size, num_rays, N_importance, 1)\n\n all_depths, all_colors, all_densities = self.unify_samples(depths_coarse, colors_coarse, densities_coarse,\n depths_fine, colors_fine, densities_fine)\n\n # Aggregate\n rgb_final, depth_final, weights = self.ray_marcher(all_colors, all_densities, all_depths, rendering_options)\n else:\n rgb_final, depth_final, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options)\n\n\n return rgb_final, depth_final, weights.sum(2)\n\n def run_model(self, planes, decoder, sample_coordinates, sample_directions, options):\n sampled_features = sample_from_planes(self.plane_axes, planes, sample_coordinates, padding_mode='zeros', box_warp=options['box_warp'])\n\n out = decoder(sampled_features, sample_directions)\n if options.get('density_noise', 0) > 0:\n out['sigma'] += torch.randn_like(out['sigma']) * options['density_noise']\n return out\n\n def sort_samples(self, all_depths, all_colors, all_densities):\n _, indices = torch.sort(all_depths, dim=-2)\n all_depths = torch.gather(all_depths, -2, indices)\n all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))\n all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))\n return all_depths, all_colors, all_densities\n\n def unify_samples(self, depths1, colors1, densities1, depths2, colors2, densities2):\n all_depths = torch.cat([depths1, depths2], dim = -2)\n all_colors = torch.cat([colors1, colors2], dim = -2)\n all_densities = torch.cat([densities1, densities2], dim = -2)\n\n _, indices = torch.sort(all_depths, dim=-2)\n all_depths = torch.gather(all_depths, -2, indices)\n all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))\n all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))\n\n return all_depths, all_colors, all_densities\n\n def sample_stratified(self, ray_origins, ray_start, ray_end, depth_resolution, disparity_space_sampling=False):\n \"\"\"\n Return depths of approximately uniformly spaced samples along rays.\n \"\"\"\n N, M, _ = ray_origins.shape\n if disparity_space_sampling:\n depths_coarse = torch.linspace(0,\n 1,\n depth_resolution,\n device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)\n depth_delta = 1/(depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta\n depths_coarse = 1./(1./ray_start * (1. - depths_coarse) + 1./ray_end * depths_coarse)\n else:\n if type(ray_start) == torch.Tensor:\n depths_coarse = math_utils.linspace(ray_start, ray_end, depth_resolution).permute(1,2,0,3)\n depth_delta = (ray_end - ray_start) / (depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta[..., None]\n else:\n depths_coarse = torch.linspace(ray_start, ray_end, depth_resolution, device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)\n depth_delta = (ray_end - ray_start)/(depth_resolution - 1)\n depths_coarse += torch.rand_like(depths_coarse) * depth_delta\n\n return depths_coarse\n\n def sample_importance(self, z_vals, weights, N_importance):\n \"\"\"\n Return depths of importance sampled points along rays. See NeRF importance sampling for more.\n \"\"\"\n with torch.no_grad():\n batch_size, num_rays, samples_per_ray, _ = z_vals.shape\n\n z_vals = z_vals.reshape(batch_size * num_rays, samples_per_ray)\n weights = weights.reshape(batch_size * num_rays, -1) # -1 to account for loss of 1 sample in MipRayMarcher\n\n # smooth weights\n weights = torch.nn.functional.max_pool1d(weights.unsqueeze(1).float(), 2, 1, padding=1)\n weights = torch.nn.functional.avg_pool1d(weights, 2, 1).squeeze()\n weights = weights + 0.01\n\n z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:])\n importance_z_vals = self.sample_pdf(z_vals_mid, weights[:, 1:-1],\n N_importance).detach().reshape(batch_size, num_rays, N_importance, 1)\n return importance_z_vals\n\n def sample_pdf(self, bins, weights, N_importance, det=False, eps=1e-5):\n \"\"\"\n Sample @N_importance samples from @bins with distribution defined by @weights.\n Inputs:\n bins: (N_rays, N_samples_+1) where N_samples_ is \"the number of coarse samples per ray - 2\"\n weights: (N_rays, N_samples_)\n N_importance: the number of samples to draw from the distribution\n det: deterministic or not\n eps: a small number to prevent division by zero\n Outputs:\n samples: the sampled samples\n \"\"\"\n N_rays, N_samples_ = weights.shape\n weights = weights + eps # prevent division by zero (don't do inplace op!)\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (N_rays, N_samples_)\n cdf = torch.cumsum(pdf, -1) # (N_rays, N_samples), cumulative distribution function\n cdf = torch.cat([torch.zeros_like(cdf[: ,:1]), cdf], -1) # (N_rays, N_samples_+1)\n # padded to 0~1 inclusive\n\n if det:\n u = torch.linspace(0, 1, N_importance, device=bins.device)\n u = u.expand(N_rays, N_importance)\n else:\n u = torch.rand(N_rays, N_importance, device=bins.device)\n u = u.contiguous()\n\n inds = torch.searchsorted(cdf, u, right=True)\n below = torch.clamp_min(inds-1, 0)\n above = torch.clamp_max(inds, N_samples_)\n\n inds_sampled = torch.stack([below, above], -1).view(N_rays, 2*N_importance)\n cdf_g = torch.gather(cdf, 1, inds_sampled).view(N_rays, N_importance, 2)\n bins_g = torch.gather(bins, 1, inds_sampled).view(N_rays, N_importance, 2)\n\n denom = cdf_g[...,1]-cdf_g[...,0]\n denom[denom<eps] = 1 # denom equals 0 means a bin has weight 0, in which case it will not be sampled\n # anyway, therefore any value for it is fine (set to 1 here)\n\n samples = bins_g[...,0] + (u-cdf_g[...,0])/denom * (bins_g[...,1]-bins_g[...,0])\n return samples" }, { "identifier": "RaySampler", "path": "models/eg3d/volumetric_rendering/ray_sampler.py", "snippet": "class RaySampler(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.ray_origins_h, self.ray_directions, self.depths, self.image_coords, self.rendering_options = None, None, None, None, None\n\n\n def forward(self, cam2world_matrix, intrinsics, resolution):\n \"\"\"\n Create batches of rays and return origins and directions.\n\n cam2world_matrix: (N, 4, 4)\n intrinsics: (N, 3, 3)\n resolution: int\n\n ray_origins: (N, M, 3)\n ray_dirs: (N, M, 2)\n \"\"\"\n N, M = cam2world_matrix.shape[0], resolution**2\n cam_locs_world = cam2world_matrix[:, :3, 3]\n fx = intrinsics[:, 0, 0]\n fy = intrinsics[:, 1, 1]\n cx = intrinsics[:, 0, 2]\n cy = intrinsics[:, 1, 2]\n sk = intrinsics[:, 0, 1]\n\n uv = torch.stack(torch.meshgrid(torch.arange(resolution, dtype=torch.float32, device=cam2world_matrix.device), torch.arange(resolution, dtype=torch.float32, device=cam2world_matrix.device), indexing='ij')) * (1./resolution) + (0.5/resolution)\n uv = uv.flip(0).reshape(2, -1).transpose(1, 0)\n uv = uv.unsqueeze(0).repeat(cam2world_matrix.shape[0], 1, 1)\n\n x_cam = uv[:, :, 0].view(N, -1)\n y_cam = uv[:, :, 1].view(N, -1)\n z_cam = torch.ones((N, M), device=cam2world_matrix.device)\n\n x_lift = (x_cam - cx.unsqueeze(-1) + cy.unsqueeze(-1)*sk.unsqueeze(-1)/fy.unsqueeze(-1) - sk.unsqueeze(-1)*y_cam/fy.unsqueeze(-1)) / fx.unsqueeze(-1) * z_cam\n y_lift = (y_cam - cy.unsqueeze(-1)) / fy.unsqueeze(-1) * z_cam\n\n cam_rel_points = torch.stack((x_lift, y_lift, z_cam, torch.ones_like(z_cam)), dim=-1)\n\n world_rel_points = torch.bmm(cam2world_matrix, cam_rel_points.permute(0, 2, 1)).permute(0, 2, 1)[:, :, :3]\n\n ray_dirs = world_rel_points - cam_locs_world[:, None, :]\n ray_dirs = torch.nn.functional.normalize(ray_dirs, dim=2)\n\n ray_origins = cam_locs_world.unsqueeze(1).repeat(1, ray_dirs.shape[1], 1)\n\n return ray_origins, ray_dirs" }, { "identifier": "SuperresolutionHybrid8XDC", "path": "models/eg3d/superresolution.py", "snippet": "class SuperresolutionHybrid8XDC(torch.nn.Module):\n def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias,\n num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE\n **block_kwargs):\n super().__init__()\n assert img_resolution == 512\n\n use_fp16 = sr_num_fp16_res > 0\n self.input_resolution = 128\n self.sr_antialias = sr_antialias\n self.block0 = SynthesisBlock(channels, 256, w_dim=512, resolution=256,\n img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)\n self.block1 = SynthesisBlock(256, 128, w_dim=512, resolution=512,\n img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)\n\n def forward(self, rgb, x, ws, **block_kwargs):\n ws = ws[:, -1:, :].repeat(1, 3, 1) # 提取最后一层的w [B, 1, 512]\n\n if x.shape[-1] != self.input_resolution:\n x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),\n mode='bilinear', align_corners=False, antialias=self.sr_antialias)\n rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),\n mode='bilinear', align_corners=False, antialias=self.sr_antialias)\n\n x, rgb = self.block0(x, rgb, ws, **block_kwargs)\n x, rgb = self.block1(x, rgb, ws, **block_kwargs)\n return rgb" }, { "identifier": "FullyConnectedLayer", "path": "models/eg3d/networks_stylegan2.py", "snippet": "class FullyConnectedLayer(torch.nn.Module):\n def __init__(self,\n in_features, # Number of input features.\n out_features, # Number of output features.\n bias = True, # Apply additive bias before the activation function?\n activation = 'linear', # Activation function: 'relu', 'lrelu', etc.\n lr_multiplier = 1, # Learning rate multiplier.\n bias_init = 0, # Initial value for the additive bias.\n ):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.activation = activation\n self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)\n self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None\n self.weight_gain = lr_multiplier / np.sqrt(in_features)\n self.bias_gain = lr_multiplier\n\n def forward(self, x):\n w = self.weight.to(x.dtype) * self.weight_gain\n b = self.bias\n if b is not None:\n b = b.to(x.dtype)\n if self.bias_gain != 1:\n b = b * self.bias_gain\n\n if self.activation == 'linear' and b is not None:\n x = torch.addmm(b.unsqueeze(0), x, w.t())\n else:\n x = x.matmul(w.t())\n x = bias_act.bias_act(x, b, act=self.activation)\n return x\n\n def extra_repr(self):\n return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'" } ]
import torch import dnnlib from torch_utils import persistence from models.eg3d.networks_stylegan2 import Generator as StyleGAN2Backbone from models.eg3d.volumetric_rendering.renderer import ImportanceRenderer from models.eg3d.volumetric_rendering.ray_sampler import RaySampler from models.eg3d.superresolution import SuperresolutionHybrid8XDC from models.eg3d.networks_stylegan2 import FullyConnectedLayer
4,998
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: LicenseRef-NvidiaProprietary # # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual # property and proprietary rights in and to this material, related # documentation and any modifications thereto. Any use, reproduction, # disclosure or distribution of this material and related documentation # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. @persistence.persistent_class class TriPlaneGenerator(torch.nn.Module): def __init__(self, z_dim, # Input latent (Z) dimensionality. c_dim, # Conditioning label (C) dimensionality. w_dim, # Intermediate latent (W) dimensionality. img_resolution, # Output resolution. img_channels, # Number of output color channels. sr_num_fp16_res = 0, mapping_kwargs = {}, # Arguments for MappingNetwork. rendering_kwargs = {}, sr_kwargs = {}, **synthesis_kwargs, # Arguments for SynthesisNetwork. ): super().__init__() self.z_dim=z_dim self.c_dim=c_dim self.w_dim=w_dim self.img_resolution=img_resolution self.img_channels=img_channels self.renderer = ImportanceRenderer() self.ray_sampler = RaySampler()
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: LicenseRef-NvidiaProprietary # # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual # property and proprietary rights in and to this material, related # documentation and any modifications thereto. Any use, reproduction, # disclosure or distribution of this material and related documentation # without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. @persistence.persistent_class class TriPlaneGenerator(torch.nn.Module): def __init__(self, z_dim, # Input latent (Z) dimensionality. c_dim, # Conditioning label (C) dimensionality. w_dim, # Intermediate latent (W) dimensionality. img_resolution, # Output resolution. img_channels, # Number of output color channels. sr_num_fp16_res = 0, mapping_kwargs = {}, # Arguments for MappingNetwork. rendering_kwargs = {}, sr_kwargs = {}, **synthesis_kwargs, # Arguments for SynthesisNetwork. ): super().__init__() self.z_dim=z_dim self.c_dim=c_dim self.w_dim=w_dim self.img_resolution=img_resolution self.img_channels=img_channels self.renderer = ImportanceRenderer() self.ray_sampler = RaySampler()
self.backbone = StyleGAN2Backbone(z_dim, c_dim, w_dim, img_resolution=256, img_channels=32*3, mapping_kwargs=mapping_kwargs, **synthesis_kwargs)
0
2023-12-09 15:18:53+00:00
8k
blaise-tk/RVC_CLI
main.py
[ { "identifier": "Config", "path": "rvc/configs/config.py", "snippet": "class Config:\n def __init__(self):\n self.device = \"cuda:0\"\n self.is_half = True\n self.use_jit = False\n self.n_cpu = 0\n self.gpu_name = None\n self.json_config = self.load_config_json()\n self.gpu_mem = None\n self.instead = \"\"\n self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()\n\n @staticmethod\n def load_config_json() -> dict:\n d = {}\n for config_file in version_config_list:\n with open(f\"rvc/configs/{config_file}\", \"r\") as f:\n d[config_file] = json.load(f)\n return d\n\n @staticmethod\n def has_mps() -> bool:\n if not torch.backends.mps.is_available():\n return False\n try:\n torch.zeros(1).to(torch.device(\"mps\"))\n return True\n except Exception:\n return False\n\n @staticmethod\n def has_xpu() -> bool:\n if hasattr(torch, \"xpu\") and torch.xpu.is_available():\n return True\n else:\n return False\n\n def use_fp32_config(self):\n for config_file in version_config_list:\n self.json_config[config_file][\"train\"][\"fp16_run\"] = False\n with open(f\"rvc/configs/{config_file}\", \"r\") as f:\n strr = f.read().replace(\"true\", \"false\")\n with open(f\"rvc/configs/{config_file}\", \"w\") as f:\n f.write(strr)\n with open(\"rvc/train/preprocess/preprocess.py\", \"r\") as f:\n strr = f.read().replace(\"3.7\", \"3.0\")\n with open(\"rvc/train/preprocess/preprocess.py\", \"w\") as f:\n f.write(strr)\n\n def device_config(self) -> tuple:\n if torch.cuda.is_available():\n if self.has_xpu():\n self.device = self.instead = \"xpu:0\"\n self.is_half = True\n i_device = int(self.device.split(\":\")[-1])\n self.gpu_name = torch.cuda.get_device_name(i_device)\n if (\n (\"16\" in self.gpu_name and \"V100\" not in self.gpu_name.upper())\n or \"P40\" in self.gpu_name.upper()\n or \"P10\" in self.gpu_name.upper()\n or \"1060\" in self.gpu_name\n or \"1070\" in self.gpu_name\n or \"1080\" in self.gpu_name\n ):\n self.is_half = False\n self.use_fp32_config()\n self.gpu_mem = int(\n torch.cuda.get_device_properties(i_device).total_memory\n / 1024\n / 1024\n / 1024\n + 0.4\n )\n if self.gpu_mem <= 4:\n with open(\"rvc/train/preprocess/preprocess.py\", \"r\") as f:\n strr = f.read().replace(\"3.7\", \"3.0\")\n with open(\"rvc/train/preprocess/preprocess.py\", \"w\") as f:\n f.write(strr)\n elif self.has_mps():\n print(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"mps\"\n self.is_half = False\n self.use_fp32_config()\n else:\n print(\"No supported Nvidia GPU found\")\n self.device = self.instead = \"cpu\"\n self.is_half = False\n self.use_fp32_config()\n\n if self.n_cpu == 0:\n self.n_cpu = cpu_count()\n\n if self.is_half:\n x_pad = 3\n x_query = 10\n x_center = 60\n x_max = 65\n else:\n x_pad = 1\n x_query = 6\n x_center = 38\n x_max = 41\n\n if self.gpu_mem is not None and self.gpu_mem <= 4:\n x_pad = 1\n x_query = 5\n x_center = 30\n x_max = 32\n\n return x_pad, x_query, x_center, x_max" }, { "identifier": "validate_sampling_rate", "path": "rvc/lib/tools/validators.py", "snippet": "def validate_sampling_rate(value):\n valid_sampling = [\n \"32000\",\n \"40000\",\n \"48000\",\n ]\n if value in valid_sampling:\n return value\n else:\n raise argparse.ArgumentTypeError(\n f\"Invalid sampling_rate. Please choose from {valid_sampling} not {value}\"\n )" }, { "identifier": "validate_f0up_key", "path": "rvc/lib/tools/validators.py", "snippet": "def validate_f0up_key(value):\n f0up_key = int(value)\n if -12 <= f0up_key <= 12:\n return f0up_key\n else:\n raise argparse.ArgumentTypeError(f\"f0up_key must be in the range of -12 to +12\")" }, { "identifier": "validate_f0method", "path": "rvc/lib/tools/validators.py", "snippet": "def validate_f0method(value):\n valid_f0methods = [\n \"pm\",\n \"dio\",\n \"crepe\",\n \"crepe-tiny\",\n \"harvest\",\n \"rmvpe\",\n ]\n if value in valid_f0methods:\n return value\n else:\n raise argparse.ArgumentTypeError(\n f\"Invalid f0method. Please choose from {valid_f0methods} not {value}\"\n )" }, { "identifier": "validate_true_false", "path": "rvc/lib/tools/validators.py", "snippet": "def validate_true_false(value):\n valid_tf = [\n \"True\",\n \"False\",\n ]\n if value in valid_tf:\n return value\n else:\n raise argparse.ArgumentTypeError(\n f\"Invalid true_false. Please choose from {valid_tf} not {value}\"\n )" }, { "identifier": "validate_tts_voices", "path": "rvc/lib/tools/validators.py", "snippet": "def validate_tts_voices(value):\n json_path = os.path.join(\"rvc\", \"lib\", \"tools\", \"tts_voices.json\")\n with open(json_path, 'r') as file:\n tts_voices_data = json.load(file)\n\n # Extrae los valores de \"ShortName\" del JSON\n short_names = [voice.get(\"ShortName\", \"\") for voice in tts_voices_data]\n if value in short_names:\n return value\n else:\n raise argparse.ArgumentTypeError(\n f\"Invalid voice. Please choose from {short_names} not {value}\"\n )" }, { "identifier": "generate_config", "path": "rvc/train/extract/preparing_files.py", "snippet": "def generate_config(rvc_version, sampling_rate, model_path):\n if rvc_version == \"v1\" or sampling_rate == \"40000\":\n config_path = f\"v1/{sampling_rate}.json\"\n else:\n config_path = f\"v2/{sampling_rate}.json\"\n config_save_path = os.path.join(model_path, \"config.json\")\n if not pathlib.Path(config_save_path).exists():\n with open(config_save_path, \"w\", encoding=\"utf-8\") as f:\n json.dump(\n config.json_config[config_path],\n f,\n ensure_ascii=False,\n indent=4,\n sort_keys=True,\n )\n f.write(\"\\n\")" }, { "identifier": "generate_filelist", "path": "rvc/train/extract/preparing_files.py", "snippet": "def generate_filelist(f0_method, model_path, rvc_version, sampling_rate):\n gt_wavs_dir = f\"{model_path}/0_gt_wavs\"\n feature_dir = (\n f\"{model_path}/3_feature256\"\n if rvc_version == \"v1\"\n else f\"{model_path}/3_feature768\"\n )\n if f0_method:\n f0_dir = f\"{model_path}/2a_f0\"\n f0nsf_dir = f\"{model_path}/2b-f0nsf\"\n names = (\n set([name.split(\".\")[0] for name in os.listdir(gt_wavs_dir)])\n & set([name.split(\".\")[0] for name in os.listdir(feature_dir)])\n & set([name.split(\".\")[0] for name in os.listdir(f0_dir)])\n & set([name.split(\".\")[0] for name in os.listdir(f0nsf_dir)])\n )\n else:\n names = set([name.split(\".\")[0] for name in os.listdir(gt_wavs_dir)]) & set(\n [name.split(\".\")[0] for name in os.listdir(feature_dir)]\n )\n options = []\n for name in names:\n if f0_method:\n options.append(\n f\"{gt_wavs_dir}/{name}.wav|{feature_dir}/{name}.npy|{f0_dir}/{name}.wav.npy|{f0nsf_dir}/{name}.wav.npy|0\"\n )\n else:\n options.append(f\"{gt_wavs_dir}/{name}.wav|{feature_dir}/{name}.npy|0\")\n fea_dim = 256 if rvc_version == \"v1\" else 768\n if f0_method:\n for _ in range(2):\n options.append(\n f\"{current_directory}/logs/mute/0_gt_wavs/mute{sampling_rate}.wav|{current_directory}/logs/mute/3_feature{fea_dim}/mute.npy|{current_directory}/logs/mute/2a_f0/mute.wav.npy|{current_directory}/logs/mute/2b-f0nsf/mute.wav.npy|0\"\n )\n else:\n for _ in range(2):\n options.append(\n f\"{current_directory}/logs/mute/0_gt_wavs/mute{sampling_rate}.wav|{current_directory}/logs/mute/3_feature{fea_dim}/mute.npy|0\"\n )\n shuffle(options)\n with open(f\"{model_path}/filelist.txt\", \"w\") as f:\n f.write(\"\\n\".join(options))" }, { "identifier": "pretrained_selector", "path": "rvc/lib/tools/pretrained_selector.py", "snippet": "def pretrained_selector(pitch_guidance):\n if pitch_guidance:\n return {\n \"v1\": {\n \"32000\": (\n \"rvc/pretraineds/pretrained_v1/f0G32k.pth\",\n \"rvc/pretraineds/pretrained_v1/f0D32k.pth\",\n ),\n \"40000\": (\n \"rvc/pretraineds/pretrained_v1/f0G40k.pth\",\n \"rvc/pretraineds/pretrained_v1/f0D40k.pth\",\n ),\n \"48000\": (\n \"rvc/pretraineds/pretrained_v1/f0G48k.pth\",\n \"rvc/pretraineds/pretrained_v1/f0D48k.pth\",\n ),\n },\n \"v2\": {\n \"32000\": (\n \"rvc/pretraineds/pretrained_v2/f0G32k.pth\",\n \"rvc/pretraineds/pretrained_v2/f0D32k.pth\",\n ),\n \"40000\": (\n \"rvc/pretraineds/pretrained_v2/f0G40k.pth\",\n \"rvc/pretraineds/pretrained_v2/f0D40k.pth\",\n ),\n \"48000\": (\n \"rvc/pretraineds/pretrained_v2/f0G48k.pth\",\n \"rvc/pretraineds/pretrained_v2/f0D48k.pth\",\n ),\n },\n }\n else:\n return {\n \"v1\": {\n \"32000\": (\n \"rvc/pretraineds/pretrained_v1/G32k.pth\",\n \"rvc/pretraineds/pretrained_v1/D32k.pth\",\n ),\n \"40000\": (\n \"rvc/pretraineds/pretrained_v1/G40k.pth\",\n \"rvc/pretraineds/pretrained_v1/D40k.pth\",\n ),\n \"48000\": (\n \"rvc/pretraineds/pretrained_v1/G48k.pth\",\n \"rvc/pretraineds/pretrained_v1/D48k.pth\",\n ),\n },\n \"v2\": {\n \"32000\": (\n \"rvc/pretraineds/pretrained_v2/G32k.pth\",\n \"rvc/pretraineds/pretrained_v2/D32k.pth\",\n ),\n \"40000\": (\n \"rvc/pretraineds/pretrained_v2/G40k.pth\",\n \"rvc/pretraineds/pretrained_v2/D40k.pth\",\n ),\n \"48000\": (\n \"rvc/pretraineds/pretrained_v2/G48k.pth\",\n \"rvc/pretraineds/pretrained_v2/D48k.pth\",\n ),\n },\n }" }, { "identifier": "model_fusion", "path": "rvc/lib/process/model_fusion.py", "snippet": "def model_fusion(model_name, pth_path_1, pth_path_2):\n ckpt1 = torch.load(pth_path_1, map_location=\"cpu\")\n ckpt2 = torch.load(pth_path_2, map_location=\"cpu\")\n if \"model\" in ckpt1:\n ckpt1 = extract(ckpt1)\n else:\n ckpt1 = ckpt1[\"weight\"]\n if \"model\" in ckpt2:\n ckpt2 = extract(ckpt2)\n else:\n ckpt2 = ckpt2[\"weight\"]\n if sorted(ckpt1.keys()) != sorted(ckpt2.keys()):\n return \"Fail to merge the models. The model architectures are not the same.\"\n opt = OrderedDict(\n weight={\n key: 1 * value.float() + (1 - 1) * ckpt2[key].float()\n for key, value in ckpt1.items()\n }\n )\n opt[\"info\"] = f\"Model fusion of {pth_path_1} and {pth_path_2}\"\n torch.save(opt, f\"logs/{model_name}.pth\")\n print(f\"Model fusion of {pth_path_1} and {pth_path_2} is done.\")" }, { "identifier": "model_information", "path": "rvc/lib/process/model_information.py", "snippet": "def model_information(path):\n model_data = torch.load(path, map_location=\"cpu\")\n\n print(f\"Loaded model from {path}\")\n\n data = model_data\n\n epochs = data.get(\"info\", \"None\")\n sr = data.get(\"sr\", \"None\")\n f0 = data.get(\"f0\", \"None\")\n version = data.get(\"version\", \"None\")\n\n return(f\"Epochs: {epochs}\\nSampling rate: {sr}\\nPitch guidance: {f0}\\nVersion: {version}\")" } ]
import os import sys import argparse import subprocess from rvc.configs.config import Config from rvc.lib.tools.validators import ( validate_sampling_rate, validate_f0up_key, validate_f0method, validate_true_false, validate_tts_voices, ) from rvc.train.extract.preparing_files import generate_config, generate_filelist from rvc.lib.tools.pretrained_selector import pretrained_selector from rvc.lib.process.model_fusion import model_fusion from rvc.lib.process.model_information import model_information
5,141
model_path = os.path.join(logs_path, str(model_name)) extract_f0_script_path = os.path.join( "rvc", "train", "extract", "extract_f0_print.py" ) extract_feature_script_path = os.path.join( "rvc", "train", "extract", "extract_feature_print.py" ) command_1 = [ "python", extract_f0_script_path, model_path, f0method, str(hop_length), ] command_2 = [ "python", extract_feature_script_path, config.device, "1", "0", "0", model_path, rvc_version, "True", ] subprocess.run(command_1) subprocess.run(command_2) generate_config(rvc_version, sampling_rate, model_path) generate_filelist(f0method, model_path, rvc_version, sampling_rate) return f"Model {model_name} extracted successfully." # Train def run_train_script( model_name, rvc_version, save_every_epoch, save_only_latest, save_every_weights, total_epoch, sampling_rate, batch_size, gpu, pitch_guidance, pretrained, custom_pretrained, g_pretrained_path=None, d_pretrained_path=None, ): f0 = 1 if pitch_guidance == "True" else 0 latest = 1 if save_only_latest == "True" else 0 save_every = 1 if save_every_weights == "True" else 0 if pretrained == "True": if custom_pretrained == "False": pg, pd = pretrained_selector(f0)[rvc_version][sampling_rate] else: if g_pretrained_path is None or d_pretrained_path is None: raise ValueError( "Please provide the path to the pretrained G and D models." ) pg, pd = g_pretrained_path, d_pretrained_path else: pg, pd = "", "" train_script_path = os.path.join("rvc", "train", "train.py") command = [ "python", train_script_path, "-se", str(save_every_epoch), "-te", str(total_epoch), "-pg", pg, "-pd", pd, "-sr", str(sampling_rate), "-bs", str(batch_size), "-g", gpu, "-e", os.path.join(logs_path, str(model_name)), "-v", rvc_version, "-l", str(latest), "-c", "0", "-sw", str(save_every), "-f0", str(f0), ] subprocess.run(command) run_index_script(model_name, rvc_version) return f"Model {model_name} trained successfully." # Index def run_index_script(model_name, rvc_version): index_script_path = os.path.join("rvc", "train", "index_generator.py") command = [ "python", index_script_path, os.path.join(logs_path, str(model_name)), rvc_version, ] subprocess.run(command) return f"Index file for {model_name} generated successfully." # Model information def run_model_information_script(pth_path):
config = Config() current_script_directory = os.path.dirname(os.path.realpath(__file__)) logs_path = os.path.join(current_script_directory, "logs") subprocess.run( ["python", os.path.join("rvc", "lib", "tools", "prerequisites_download.py")] ) # Infer def run_infer_script( f0up_key, filter_radius, index_rate, hop_length, f0method, input_path, output_path, pth_file, index_path, ): infer_script_path = os.path.join("rvc", "infer", "infer.py") command = [ "python", infer_script_path, str(f0up_key), str(filter_radius), str(index_rate), str(hop_length), f0method, input_path, output_path, pth_file, index_path, ] subprocess.run(command) return f"File {input_path} inferred successfully." # Batch infer def run_batch_infer_script( f0up_key, filter_radius, index_rate, hop_length, f0method, input_folder, output_folder, pth_file, index_path, ): infer_script_path = os.path.join("rvc", "infer", "infer.py") audio_files = [ f for f in os.listdir(input_folder) if f.endswith((".mp3", ".wav", ".flac")) ] print(f"Detected {len(audio_files)} audio files for inference.") for audio_file in audio_files: if "_output" in audio_file: pass else: input_path = os.path.join(input_folder, audio_file) output_file_name = os.path.splitext(os.path.basename(audio_file))[0] output_path = os.path.join( output_folder, f"{output_file_name}_output{os.path.splitext(audio_file)[1]}", ) print(f"Inferring {input_path}...") command = [ "python", infer_script_path, str(f0up_key), str(filter_radius), str(index_rate), str(hop_length), f0method, input_path, output_path, pth_file, index_path, ] subprocess.run(command) return f"Files from {input_folder} inferred successfully." # TTS def run_tts_script( tts_text, tts_voice, f0up_key, filter_radius, index_rate, hop_length, f0method, output_tts_path, output_rvc_path, pth_file, index_path, ): tts_script_path = os.path.join("rvc", "lib", "tools", "tts.py") infer_script_path = os.path.join("rvc", "infer", "infer.py") if os.path.exists(output_tts_path): os.remove(output_tts_path) command_tts = [ "python", tts_script_path, tts_text, tts_voice, output_tts_path, ] command_infer = [ "python", infer_script_path, str(f0up_key), str(filter_radius), str(index_rate), str(hop_length), f0method, output_tts_path, output_rvc_path, pth_file, index_path, ] subprocess.run(command_tts) subprocess.run(command_infer) return f"Text {tts_text} synthesized successfully.", output_rvc_path # Preprocess def run_preprocess_script(model_name, dataset_path, sampling_rate): per = 3.0 if config.is_half else 3.7 preprocess_script_path = os.path.join("rvc", "train", "preprocess", "preprocess.py") command = [ "python", preprocess_script_path, os.path.join(logs_path, str(model_name)), dataset_path, str(sampling_rate), str(per), ] os.mkdir(os.path.join(logs_path, str(model_name))) subprocess.run(command) return f"Model {model_name} preprocessed successfully." # Extract def run_extract_script(model_name, rvc_version, f0method, hop_length, sampling_rate): model_path = os.path.join(logs_path, str(model_name)) extract_f0_script_path = os.path.join( "rvc", "train", "extract", "extract_f0_print.py" ) extract_feature_script_path = os.path.join( "rvc", "train", "extract", "extract_feature_print.py" ) command_1 = [ "python", extract_f0_script_path, model_path, f0method, str(hop_length), ] command_2 = [ "python", extract_feature_script_path, config.device, "1", "0", "0", model_path, rvc_version, "True", ] subprocess.run(command_1) subprocess.run(command_2) generate_config(rvc_version, sampling_rate, model_path) generate_filelist(f0method, model_path, rvc_version, sampling_rate) return f"Model {model_name} extracted successfully." # Train def run_train_script( model_name, rvc_version, save_every_epoch, save_only_latest, save_every_weights, total_epoch, sampling_rate, batch_size, gpu, pitch_guidance, pretrained, custom_pretrained, g_pretrained_path=None, d_pretrained_path=None, ): f0 = 1 if pitch_guidance == "True" else 0 latest = 1 if save_only_latest == "True" else 0 save_every = 1 if save_every_weights == "True" else 0 if pretrained == "True": if custom_pretrained == "False": pg, pd = pretrained_selector(f0)[rvc_version][sampling_rate] else: if g_pretrained_path is None or d_pretrained_path is None: raise ValueError( "Please provide the path to the pretrained G and D models." ) pg, pd = g_pretrained_path, d_pretrained_path else: pg, pd = "", "" train_script_path = os.path.join("rvc", "train", "train.py") command = [ "python", train_script_path, "-se", str(save_every_epoch), "-te", str(total_epoch), "-pg", pg, "-pd", pd, "-sr", str(sampling_rate), "-bs", str(batch_size), "-g", gpu, "-e", os.path.join(logs_path, str(model_name)), "-v", rvc_version, "-l", str(latest), "-c", "0", "-sw", str(save_every), "-f0", str(f0), ] subprocess.run(command) run_index_script(model_name, rvc_version) return f"Model {model_name} trained successfully." # Index def run_index_script(model_name, rvc_version): index_script_path = os.path.join("rvc", "train", "index_generator.py") command = [ "python", index_script_path, os.path.join(logs_path, str(model_name)), rvc_version, ] subprocess.run(command) return f"Index file for {model_name} generated successfully." # Model information def run_model_information_script(pth_path):
print(model_information(pth_path))
10
2023-12-10 21:09:41+00:00
8k
SubConv/SubConv
api.py
[ { "identifier": "pack", "path": "modules/pack.py", "snippet": "async def pack(url: list, urlstandalone: list, urlstandby:list, urlstandbystandalone: list, content: str, interval, domain, short):\n regionDict, total, providerProxyNames = await parse.mkList(content, urlstandalone) # regions available and corresponding group name\n result = {}\n\n # create a snippet containing region groups\n regionGroups = []\n for i in total.values():\n regionGroups.append(i[1])\n \n\n if short is None:\n # head of config\n result.update(head.HEAD)\n\n # dns\n result.update(head.DNS)\n\n # proxies\n proxies = {\n \"proxies\": []\n }\n proxiesName = []\n proxiesStandbyName = []\n\n if urlstandalone or urlstandbystandalone:\n if urlstandalone:\n for i in urlstandalone:\n proxies[\"proxies\"].append(\n i\n )\n proxiesName.append(i[\"name\"])\n proxiesStandbyName.append(i[\"name\"])\n if urlstandbystandalone:\n for i in urlstandbystandalone:\n proxies[\"proxies\"].append(\n i\n )\n proxiesStandbyName.append(i[\"name\"])\n if len(proxies[\"proxies\"]) == 0:\n proxies = None\n if len(proxiesName) == 0:\n proxiesName = None\n if len(proxiesStandbyName) == 0:\n proxiesStandbyName = None\n if proxies:\n result.update(proxies)\n\n\n # proxy providers\n providers = {\n \"proxy-providers\": {}\n }\n if url or urlstandby:\n if url:\n for u in range(len(url)):\n providers[\"proxy-providers\"].update({\n \"subscription{}\".format(u): {\n \"type\": \"http\",\n \"url\": url[u],\n \"interval\": int(interval),\n \"path\": \"./sub/subscription{}.yaml\".format(u),\n \"health-check\": {\n \"enable\": True,\n \"interval\": 60,\n # \"lazy\": True,\n \"url\": config.test_url\n }\n }\n })\n if urlstandby:\n for u in range(len(urlstandby)):\n providers[\"proxy-providers\"].update({\n \"subscription{}\".format(\"sub\"+str(u)): {\n \"type\": \"http\",\n \"url\": urlstandby[u],\n \"interval\": int(interval),\n \"path\": \"./sub/subscription{}.yaml\".format(\"sub\"+str(u)),\n \"health-check\": {\n \"enable\": True,\n \"interval\": 60,\n # \"lazy\": True,\n \"url\": config.test_url\n }\n }\n })\n if len(providers[\"proxy-providers\"]) == 0:\n providers = None\n if providers:\n result.update(providers)\n\n # result += head.PROXY_GROUP_HEAD\n proxyGroups = {\n \"proxy-groups\": []\n }\n \n # add proxy select\n proxySelect = {\n \"name\": \"🚀 节点选择\",\n \"type\": \"select\",\n \"proxies\": []\n }\n for group in config.custom_proxy_group:\n if group.get(\"rule\") == False:\n proxySelect[\"proxies\"].append(group[\"name\"])\n proxySelect[\"proxies\"].extend(regionGroups)\n proxySelect[\"proxies\"].append(\"DIRECT\")\n proxyGroups[\"proxy-groups\"].append(proxySelect)\n\n \n\n # generate subscriptions and standby subscriptions list\n subscriptions = []\n if url:\n for u in range(len(url)):\n subscriptions.append(\"subscription{}\".format(u))\n standby = subscriptions.copy()\n if urlstandby:\n for u in range(len(urlstandby)):\n standby.append(\"subscriptionsub{}\".format(u))\n if len(subscriptions) == 0:\n subscriptions = None\n if len(standby) == 0:\n standby = None\n\n\n # add proxy groups\n for group in config.custom_proxy_group:\n type = group[\"type\"]\n region = group.get(\"region\")\n regex = group.get(\"regex\")\n\n rule = group.get(\"rule\")\n if rule is None:\n rule = True\n\n if type == \"select\" and rule:\n prior = group[\"prior\"]\n if prior == \"DIRECT\":\n proxyGroups[\"proxy-groups\"].append({\n \"name\": group[\"name\"],\n \"type\": \"select\",\n \"proxies\": [\n \"DIRECT\",\n \"REJECT\",\n \"🚀 节点选择\",\n *regionGroups,\n *[_group[\"name\"] for _group in config.custom_proxy_group if _group.get(\"rule\") == False]\n ]\n })\n elif prior == \"REJECT\":\n proxyGroups[\"proxy-groups\"].append({\n \"name\": group[\"name\"],\n \"type\": \"select\",\n \"proxies\": [\n \"REJECT\",\n \"DIRECT\",\n \"🚀 节点选择\",\n *regionGroups,\n *[_group[\"name\"] for _group in config.custom_proxy_group if _group.get(\"rule\") == False]\n ]\n })\n else:\n proxyGroups[\"proxy-groups\"].append({\n \"name\": group[\"name\"],\n \"type\": \"select\",\n \"proxies\": [\n \"🚀 节点选择\",\n *regionGroups,\n *[_group[\"name\"] for _group in config.custom_proxy_group if _group.get(\"rule\") == False],\n \"DIRECT\",\n \"REJECT\"\n ]\n })\n\n elif type == \"load-balance\" or type == \"select\" or type == \"fallback\" or type == \"url-test\":\n # init\n proxyGroup = {\n \"name\": group[\"name\"],\n \"type\": type\n }\n # add proxies\n if regex is not None or region is not None:\n if regex is not None:\n tmp = [regex]\n else:\n tmp = []\n for i in region:\n if i in total:\n tmp.append(total[i][0])\n if len(tmp) > 0:\n providerProxies = []\n proxyGroupProxies = []\n proxyGroup[\"filter\"] = \"|\".join(tmp)\n # check if the proxy is in the subscription match the regex\n # check if the standalone proxy match the regex\n if group.get(\"manual\"):\n if standby:\n for p in standby:\n if re.search(\n proxyGroup[\"filter\"],\n p,\n re.I\n ) is not None:\n providerProxies.append(p)\n break\n if len(providerProxies) > 0:\n proxyGroup[\"use\"] = standby\n if proxiesStandbyName:\n for p in proxiesStandbyName:\n if re.search(\n proxyGroup[\"filter\"],\n p,\n re.I\n ) is not None:\n proxyGroupProxies.append(p)\n if len(proxyGroupProxies) > 0:\n proxyGroup[\"proxies\"] = proxyGroupProxies\n else:\n if subscriptions:\n for p in providerProxyNames:\n if re.search(\n proxyGroup[\"filter\"],\n p,\n re.I\n ) is not None:\n providerProxies.append(p)\n break\n if len(providerProxies) > 0:\n proxyGroup[\"use\"] = subscriptions\n if proxiesName:\n for p in proxiesName:\n if re.search(\n proxyGroup[\"filter\"],\n p,\n re.I\n ) is not None:\n proxyGroupProxies.append(p)\n if len(proxyGroupProxies) > 0:\n proxyGroup[\"proxies\"] = proxyGroupProxies\n # if no proxy match the regex, remove the name in the first group\n if len(providerProxies) + len(proxyGroupProxies) == 0:\n proxyGroups[\"proxy-groups\"][0][\"proxies\"].remove(group[\"name\"])\n proxyGroup = None\n else:\n proxyGroups[\"proxy-groups\"][0][\"proxies\"].remove(group[\"name\"])\n proxyGroup = None\n if proxyGroup is not None:\n if type == \"load-balance\":\n proxyGroup[\"strategy\"] = \"consistent-hashing\"\n proxyGroup[\"url\"] = config.test_url\n proxyGroup[\"interval\"] = 60\n proxyGroup[\"tolerance\"] = 50\n elif type == \"fallback\":\n proxyGroup[\"url\"] = config.test_url\n proxyGroup[\"interval\"] = 60\n proxyGroup[\"tolerance\"] = 50\n elif type == \"url-test\":\n proxyGroup[\"url\"] = config.test_url\n proxyGroup[\"interval\"] = 60\n proxyGroup[\"tolerance\"] = 50\n else:\n if group.get(\"manual\"):\n if standby:\n proxyGroup[\"use\"] = standby\n if proxiesStandbyName:\n proxyGroup[\"proxies\"] = proxiesStandbyName\n else:\n if subscriptions:\n proxyGroup[\"use\"] = subscriptions\n if proxiesName:\n proxyGroup[\"proxies\"] = proxiesName\n if proxyGroup is not None:\n proxyGroups[\"proxy-groups\"].append(proxyGroup)\n\n # add region groups\n for i in total:\n urlTest = {\n \"name\": total[i][1],\n \"type\": \"url-test\",\n \"url\": config.test_url,\n \"interval\": 60,\n \"tolerance\": 50,\n \"filter\": total[i][0]\n }\n if subscriptions:\n urlTest[\"use\"] = subscriptions\n if proxiesName:\n urlTestProxies = []\n for p in proxiesName:\n if re.search(\n total[i][0],\n p,\n re.I\n ) is not None:\n urlTestProxies.append(p)\n if len(urlTestProxies) > 0:\n urlTest[\"proxies\"] = urlTestProxies\n else:\n urlTestProxies = None\n proxyGroups[\"proxy-groups\"].append(urlTest)\n\n # remove proxies that do not exist in any proxy group\n proxyGroupAndProxyList = ([\"DIRECT\", \"REJECT\"])\n proxyGroupAndProxyList.extend([i[\"name\"] for i in proxyGroups[\"proxy-groups\"]])\n if proxiesStandbyName is not None:\n proxyGroupAndProxyList.extend(proxiesStandbyName)\n for proxygroup in proxyGroups[\"proxy-groups\"]:\n if \"proxies\" not in proxygroup:\n continue\n for proxy in proxygroup[\"proxies\"]:\n if proxy not in proxyGroupAndProxyList:\n proxygroup[\"proxies\"].remove(proxy)\n\n result.update(proxyGroups)\n\n # rules\n yaml.SafeDumper.ignore_aliases = lambda *args : True\n result = yaml.safe_dump(result, allow_unicode=True, sort_keys=False)\n result += (\"rules:\\n - DOMAIN,{},DIRECT\\n\".format(domain) + cache.cache)\n return result" }, { "identifier": "parse", "path": "modules/parse.py", "snippet": "async def parseSubs(content):\nasync def mkList(content: list, urlstandalone: list):" }, { "identifier": "converter", "path": "modules/convert/converter.py", "snippet": "async def ConvertsV2Ray(buf):" } ]
from modules import pack from modules import parse from modules.convert import converter from fastapi import FastAPI, HTTPException from fastapi.requests import Request from fastapi.responses import FileResponse, Response from fastapi.staticfiles import StaticFiles from urllib.parse import urlencode, unquote from pathlib import Path import uvicorn import httpx import argparse import re
3,821
# coding=utf-8 def length(sth): if sth is None: return 0 else: return len(sth) app = FastAPI() # mainpage app.mount("/static", StaticFiles(directory="static"), name="static") @app.get("/") async def mainpage(): return FileResponse("static/index.html") # subscription to proxy-provider @app.get("/provider") async def provider(request: Request): headers = {'Content-Type': 'text/yaml;charset=utf-8'} url = request.query_params.get("url") async with httpx.AsyncClient() as client: resp = await client.get(url, headers={'User-Agent':'clash'}) if resp.status_code < 200 or resp.status_code >= 300: raise HTTPException(status_code=resp.status_code, detail=resp.text) result = await parse.parseSubs(resp.text) return Response(content=result, headers=headers) # subscription converter api @app.get("/sub") async def sub(request: Request): args = request.query_params # get interval if "interval" in args: interval = args["interval"] else: interval = "1800" short = args.get("short") # get the url of original subscription url = args.get("url") url = re.split(r"[|\n]", url) # remove empty lines tmp = list(filter(lambda x: x!="", url)) url = [] urlstandalone = [] for i in tmp: if (i.startswith("http://") or i.startswith("https://")) and not i.startswith("https://t.me/"): url.append(i) else: urlstandalone.append(i) urlstandalone = "\n".join(urlstandalone) if len(url) == 0: url = None if len(urlstandalone) == 0: urlstandalone = None urlstandby = args.get("urlstandby") urlstandbystandalone = None if urlstandby: urlstandby = re.split(r"[|\n]", urlstandby) tmp = list(filter(lambda x: x!="", urlstandby)) urlstandby = [] urlstandbystandalone = [] for i in tmp: if (i.startswith("http://") or i.startswith("https://")) and not i.startswith("https://t.me/"): urlstandby.append(i) else: urlstandbystandalone.append(i) urlstandbystandalone = "\n".join(urlstandbystandalone) if len(urlstandby) == 0: urlstandby = None if len(urlstandbystandalone) == 0: urlstandbystandalone = None if urlstandalone: urlstandalone = await converter.ConvertsV2Ray(urlstandalone) if urlstandbystandalone: urlstandbystandalone = await converter.ConvertsV2Ray(urlstandbystandalone) async with httpx.AsyncClient() as client: # get original headers headers = {'Content-Type': 'text/yaml;charset=utf-8'} # if there's only one subscription, return userinfo if length(url) == 1: resp = await client.head(url[0], headers={'User-Agent':'clash'}) if resp.status_code < 200 or resp.status_code >= 300: raise HTTPException(status_code=resp.status_code, detail=resp.text) originalHeaders = resp.headers if 'subscription-userinfo' in originalHeaders: # containing info about ramaining flow headers['subscription-userinfo'] = originalHeaders['subscription-userinfo'] if 'Content-Disposition' in originalHeaders: # containing filename headers['Content-Disposition'] = originalHeaders['Content-Disposition'].replace("attachment", "inline") content = [] # the proxies of original subscriptions if url is not None: for i in range(len(url)): # the test of response respText = (await client.get(url[i], headers={'User-Agent':'clash'})).text content.append(await parse.parseSubs(respText)) url[i] = "{}provider?{}".format(request.base_url, urlencode({"url": url[i]})) if len(content) == 0: content = None if urlstandby: for i in range(len(urlstandby)): urlstandby[i] = "{}provider?{}".format(request.base_url, urlencode({"url": urlstandby[i]})) # get the domain or ip of this api to add rule for this domain = re.search(r"([^:]+)(:\d{1,5})?", request.url.hostname).group(1) # generate the subscription
#!/usr/bin/env python3 # coding=utf-8 def length(sth): if sth is None: return 0 else: return len(sth) app = FastAPI() # mainpage app.mount("/static", StaticFiles(directory="static"), name="static") @app.get("/") async def mainpage(): return FileResponse("static/index.html") # subscription to proxy-provider @app.get("/provider") async def provider(request: Request): headers = {'Content-Type': 'text/yaml;charset=utf-8'} url = request.query_params.get("url") async with httpx.AsyncClient() as client: resp = await client.get(url, headers={'User-Agent':'clash'}) if resp.status_code < 200 or resp.status_code >= 300: raise HTTPException(status_code=resp.status_code, detail=resp.text) result = await parse.parseSubs(resp.text) return Response(content=result, headers=headers) # subscription converter api @app.get("/sub") async def sub(request: Request): args = request.query_params # get interval if "interval" in args: interval = args["interval"] else: interval = "1800" short = args.get("short") # get the url of original subscription url = args.get("url") url = re.split(r"[|\n]", url) # remove empty lines tmp = list(filter(lambda x: x!="", url)) url = [] urlstandalone = [] for i in tmp: if (i.startswith("http://") or i.startswith("https://")) and not i.startswith("https://t.me/"): url.append(i) else: urlstandalone.append(i) urlstandalone = "\n".join(urlstandalone) if len(url) == 0: url = None if len(urlstandalone) == 0: urlstandalone = None urlstandby = args.get("urlstandby") urlstandbystandalone = None if urlstandby: urlstandby = re.split(r"[|\n]", urlstandby) tmp = list(filter(lambda x: x!="", urlstandby)) urlstandby = [] urlstandbystandalone = [] for i in tmp: if (i.startswith("http://") or i.startswith("https://")) and not i.startswith("https://t.me/"): urlstandby.append(i) else: urlstandbystandalone.append(i) urlstandbystandalone = "\n".join(urlstandbystandalone) if len(urlstandby) == 0: urlstandby = None if len(urlstandbystandalone) == 0: urlstandbystandalone = None if urlstandalone: urlstandalone = await converter.ConvertsV2Ray(urlstandalone) if urlstandbystandalone: urlstandbystandalone = await converter.ConvertsV2Ray(urlstandbystandalone) async with httpx.AsyncClient() as client: # get original headers headers = {'Content-Type': 'text/yaml;charset=utf-8'} # if there's only one subscription, return userinfo if length(url) == 1: resp = await client.head(url[0], headers={'User-Agent':'clash'}) if resp.status_code < 200 or resp.status_code >= 300: raise HTTPException(status_code=resp.status_code, detail=resp.text) originalHeaders = resp.headers if 'subscription-userinfo' in originalHeaders: # containing info about ramaining flow headers['subscription-userinfo'] = originalHeaders['subscription-userinfo'] if 'Content-Disposition' in originalHeaders: # containing filename headers['Content-Disposition'] = originalHeaders['Content-Disposition'].replace("attachment", "inline") content = [] # the proxies of original subscriptions if url is not None: for i in range(len(url)): # the test of response respText = (await client.get(url[i], headers={'User-Agent':'clash'})).text content.append(await parse.parseSubs(respText)) url[i] = "{}provider?{}".format(request.base_url, urlencode({"url": url[i]})) if len(content) == 0: content = None if urlstandby: for i in range(len(urlstandby)): urlstandby[i] = "{}provider?{}".format(request.base_url, urlencode({"url": urlstandby[i]})) # get the domain or ip of this api to add rule for this domain = re.search(r"([^:]+)(:\d{1,5})?", request.url.hostname).group(1) # generate the subscription
result = await pack.pack(url=url, urlstandalone=urlstandalone, urlstandby=urlstandby,urlstandbystandalone=urlstandbystandalone, content=content, interval=interval, domain=domain, short=short)
0
2023-12-06 12:57:11+00:00
8k
Opt-Mucca/PySCIPOpt-ML
src/pyscipopt_ml/modelling/neuralnet/layers.py
[ { "identifier": "ParameterError", "path": "src/pyscipopt_ml/exceptions.py", "snippet": "class ParameterError(Exception):\n \"\"\"Wrong parameter to a function.\"\"\"\n\n def __init__(self, message):\n super().__init__(message)" }, { "identifier": "AbstractPredictorConstr", "path": "src/pyscipopt_ml/modelling/base_predictor_constraint.py", "snippet": "class AbstractPredictorConstr(ABC):\n \"\"\"Base class to store all information of embedded ML model by :py:func`pyscipopt_ml.add_predictor_constr`.\n\n This class is the base class to store everything that is added to\n a SCIP model when a trained predictor is inserted into it. Depending on\n the type of the predictor, a class derived from it will be returned\n by :py:func:`pyscipopt_ml.add_predictor_constr`.\n\n Warning\n -------\n\n Users should usually never construct objects of this class or one of its derived\n classes. They are returned by the :py:func:`pyscipopt_ml.add_predictor_constr` and\n other functions.\n \"\"\"\n\n def __init__(\n self, scip_model, input_vars, output_vars=None, unique_naming_prefix=\"\", **kwargs\n ):\n self.scip_model = scip_model\n self.unique_naming_prefix = unique_naming_prefix\n self._validate(input_vars, output_vars)\n self._created_vars = []\n self._created_cons = []\n self._build_predictor_model(**kwargs)\n\n def _validate(self, input_vars, output_vars=None):\n \"\"\"Validate input and output variables (check shapes, reshape if needed).\"\"\"\n\n # Ensure the correct type of input and output is given\n if type(input_vars) not in [list, np.ndarray]:\n raise ParameterError(\n f\"Input variables are not type list or np.ndarray. They are type {type(input_vars)}.\"\n )\n if output_vars is not None:\n if not isinstance(output_vars, list) and not isinstance(output_vars, np.ndarray):\n raise ParameterError(\n f\"Output variables are not type list or np.ndarray. They are type {type(output_vars)}.\"\n )\n\n # Transform the type list to type np.ndarray\n if isinstance(input_vars, list):\n input_vars = np.array(input_vars, dtype=object)\n if isinstance(output_vars, list):\n output_vars = np.array(output_vars, dtype=object)\n\n # Change the dimension of the input variables if needed. (Always want number of data points first)\n if input_vars.ndim == 1:\n input_vars = input_vars.reshape((1, -1))\n if input_vars.ndim >= 3:\n input_vars = input_vars.reshape((input_vars.shape[0], -1))\n\n # In the case of the output being None, create the appropriate output variables here\n if output_vars is None:\n output_vars = self._create_output_vars(input_vars)\n\n # Change the dimensions of the output variables if needed (Always want the number of data points first)\n if output_vars.ndim == 1:\n if input_vars.shape[0] == 1:\n output_vars = output_vars.reshape((1, -1))\n else:\n output_vars = output_vars.reshape((-1, 1))\n\n # Ensure that the variable dimensions match that of the predictor\n if hasattr(self, \"input_size\") and input_vars.shape[-1] != self.input_size:\n raise ParameterError(\n f\"Input variables dimension don't conform with predictor {type(self)} \"\n + f\"Input variable dimensions: {input_vars.shape[-1]} != {self.input_size}\"\n )\n\n if hasattr(self, \"output_size\") and output_vars.shape[-1] != self.output_size:\n raise ParameterError(\n f\"Output variable dimensions don't conform with predictor {type(self)} \"\n + f\"Output variable dimensions: {output_vars.shape[-1]} != {self.output_size}\"\n )\n\n if output_vars.shape[0] != input_vars.shape[0]:\n raise ParameterError(\n \"Non-conforming dimension between input variables and output variables: \"\n + f\"{output_vars.shape[0]} != {input_vars.shape[0]}\"\n )\n\n self._input = input_vars\n self._output = output_vars\n\n def _build_predictor_model(self, **kwargs):\n self._mip_model(**kwargs)\n\n def print_stats(self, file=None):\n \"\"\"Print statistics on model additions stored by this class.\n\n This function prints detailed statistics on the variables\n and constraints that were added to the model.\n\n Arguments\n ---------\n\n file: None, optional\n Text stream to which output should be redirected. By default, this is sys.stdout.\n \"\"\"\n\n n_indicator_cons = 0\n n_sos_cons = 0\n n_linear_cons = 0\n\n created_cons = self._created_cons\n created_vars = self._created_vars\n if hasattr(self, \"_estimators\"):\n for estimator in self._estimators:\n created_cons += estimator._created_cons\n created_vars += estimator._created_vars\n if hasattr(self, \"_layers\"):\n for layer in self._layers:\n created_cons += layer._created_cons\n created_vars += layer._created_vars\n for cons_set in created_cons:\n it = np.nditer(cons_set, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n if isinstance(cons_set[it.multi_index], Constraint):\n cons_type = cons_set[it.multi_index].getConshdlrName()\n if cons_type == \"indicator\":\n n_indicator_cons += 1\n elif cons_type == \"SOS1\":\n n_sos_cons += 1\n elif cons_type == \"linear\":\n n_linear_cons += 1\n else:\n raise TypeError(\n f\"Cons {cons_set[it.multi_index]} is of unknown type {cons_type}\"\n )\n\n n_bin_vars = 0\n n_cont_vars = 0\n\n for var_set in created_vars:\n it = np.nditer(var_set, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n if isinstance(var_set[it.multi_index], Variable):\n var_type = var_set[it.multi_index].vtype()\n if var_type == \"BINARY\":\n n_bin_vars += 1\n elif var_type == \"CONTINUOUS\":\n n_cont_vars += 1\n else:\n raise TypeError(\n f\"Var {var_set[it.multi_index]} is of unknown type {var_type}\"\n )\n\n print(\n f\"Constraints created:\\n Linear {n_linear_cons}\\n Indicator {n_indicator_cons}\\n \"\n f\"SOS1 {n_sos_cons}\\n\"\n f\"Created (internal) variables:\\n Binary {n_bin_vars}\\n Continuous {n_cont_vars}\\n\"\n f\"Input Shape: {self.input.shape}\\nOutput Shape: {self.output.shape}\",\n file=file,\n )\n\n def _create_output_vars(self, input_vars):\n \"\"\"May be defined in derived class to create the output variables of predictor.\"\"\"\n if (not hasattr(self, \"_output\") or self._output is None) and (\n not hasattr(self, \"output_size\") or self.output_size is None\n ):\n raise AttributeError\n\n if not hasattr(self, \"_output\") or self._output is None:\n if hasattr(self, \"classification\"):\n if self.classification:\n vtype = \"B\"\n else:\n vtype = \"C\"\n else:\n vtype = \"C\"\n output_vars = create_vars(\n self.scip_model,\n (input_vars.shape[0], self.output_size),\n vtype,\n lb=None,\n ub=None,\n name_prefix=\"out\",\n )\n return output_vars\n else:\n return self._output\n\n @property\n def _has_solution(self):\n \"\"\"Returns true if we have a solution.\"\"\"\n if self.scip_model.getNSols() > 0:\n return True\n return False\n\n @abstractmethod\n def get_error(self, eps):\n \"\"\"Returns error in SCIP's solution with respect to prediction from input.\n\n Returns\n -------\n error : ndarray of same shape as\n :py:attr:`pyscipopt_ml.modelling.base_predictor_constr.AbstractPredictorConstr.output`\n Assuming that we have a solution for the input and output variables\n `x, y`. Returns the absolute value of the differences between `predictor.predict(x)` and\n `y`. Where predictor is the regression / classification model represented by this object.\n\n Raises\n ------\n NoSolution\n If the SCIP model has no solution (either was not optimized or is infeasible).\n \"\"\"\n ...\n\n @abstractmethod\n def _mip_model(self, **kwargs):\n \"\"\"Makes MIP model for the predictor.\"\"\"\n ...\n\n @property\n def input(self):\n \"\"\"Returns the input variables of embedded predictor.\n\n Returns\n -------\n output : np.ndarray\n \"\"\"\n return self._input\n\n @property\n def output(self):\n \"\"\"Output variables of embedded predictor.\n\n Returns\n -------\n output : np.ndarray\n \"\"\"\n return self._output\n\n @property\n def input_values(self):\n \"\"\"Returns the values for the input variables if a solution is known.\n\n Returns\n -------\n input_vals : np.ndarray\n\n Raises\n ------\n NoSolution\n If SCIP has no solution (either was not optimized or is infeasible).\n \"\"\"\n if not self._has_solution:\n raise NoSolution\n\n input_vals = np.zeros(self.input.shape)\n for i in range(self.input.shape[0]):\n for j in range(self.input.shape[1]):\n input_vals[i][j] = self.scip_model.getVal(self.input[i][j])\n\n return input_vals\n\n @property\n def output_values(self):\n \"\"\"Returns the values for the output variables if a solution is known.\n\n Returns\n -------\n output_value : np.ndarray\n\n Raises\n ------\n NoSolution\n If SCIP has no solution (either was not optimized or is infeasible).\n \"\"\"\n if not self._has_solution:\n raise NoSolution\n\n output_vals = np.zeros(self.output.shape)\n for i in range(self.output.shape[0]):\n for j in range(self.output.shape[1]):\n output_vals[i][j] = self.scip_model.getVal(self.output[i][j])\n\n return output_vals\n\n def __str__(self):\n return self._name" }, { "identifier": "create_vars", "path": "src/pyscipopt_ml/modelling/var_utils.py", "snippet": "def create_vars(scip_model, shape, vtype, lb=None, ub=None, name_prefix=\"\"):\n \"\"\"\n Create PySCIPOpt variables in a numpy.ndarray of a given shape.\n\n Parameters\n ----------\n scip_model : PySCIPOpt Model\n The SCIP Model where the predictor should be inserted.\n shape : tuple\n The shape of the numpy array that will be constructed\n vtype : 'C' | 'B' | 'I'\n Whether the variables will be continuous, binary, or integer\n lb : float or int or None, optional\n The lower bound of the variables\n ub : float or int or None, optional\n The upper bound of the variables\n name_prefix : str, optional\n The naming prefix used for these variables\n\n Returns\n -------\n scip_vars : np.ndarray\n A np.ndarray with shape (shape) that contains uniquely names variables all of which are the specified type\n \"\"\"\n\n scip_vars = np.zeros(shape, dtype=object)\n it = np.nditer(scip_vars, flags=[\"multi_index\", \"refs_ok\"])\n for _ in it:\n idx_list = str(it.multi_index).strip(\")\").strip(\"(\").split(\",\")\n idx_string = \"\"\n for idx in idx_list:\n if idx == \"\":\n continue\n idx_string += f\"_{int(idx)}\"\n name = name_prefix + idx_string\n scip_vars[it.multi_index] = scip_model.addVar(vtype=vtype, lb=lb, ub=ub, name=name)\n return scip_vars" }, { "identifier": "add_identity_activation_constraint_layer", "path": "src/pyscipopt_ml/modelling/neuralnet/activations.py", "snippet": "def add_identity_activation_constraint_layer(layer):\n \"\"\"\n MIP model for identity activation on a layer\n\n Parameters\n ----------\n layer : AbstractNNLayer\n Layer to which activation is applied.\n\n Returns\n -------\n\n affine_cons : np.ndarray\n A numpy array containing the linear transformation constraints\n\n \"\"\"\n\n n_samples = layer.input.shape[0]\n n_nodes_left = layer.input.shape[-1]\n n_nodes_right = layer.output.shape[-1]\n affine_cons = np.zeros((n_samples, n_nodes_right), dtype=object)\n\n # Perform some basic activity based bound propagation\n propagation_success, lbs, ubs = propagate_identity_bounds(\n layer, n_samples, n_nodes_left, n_nodes_right, False\n )\n\n for i in range(n_samples):\n for j in range(n_nodes_right):\n rhs = (\n quicksum(layer.coefs[k][j] * layer.input[i][k] for k in range(n_nodes_left))\n + layer.intercept[j]\n )\n name = layer.unique_naming_prefix + f\"affine_{i}_{j}\"\n affine_cons[i][j] = layer.scip_model.addCons(layer.output[i][j] == rhs, name=name)\n # Propagate bounds\n if propagation_success:\n if abs(lbs[i][j]) < 10**5:\n output_lb = layer.output[i][j].getLbOriginal()\n layer.scip_model.chgVarLb(layer.output[i][j], max(lbs[i][j], output_lb))\n if abs(ubs[i][j]) < 10**5:\n output_ub = layer.output[i][j].getUbOriginal()\n layer.scip_model.chgVarUb(layer.output[i][j], min(ubs[i][j], output_ub))\n\n return affine_cons" }, { "identifier": "add_relu_activation_constraint_layer", "path": "src/pyscipopt_ml/modelling/neuralnet/activations.py", "snippet": "def add_relu_activation_constraint_layer(layer, slack, activation_only=True):\n \"\"\"\n MIP model for ReLU activation on a layer\n\n Parameters\n ----------\n layer : AbstractNNLayer\n Layer to which activation is applied.\n\n slack : np.ndarray\n Slack variables that will be used in the SOS formulation\n\n activation_only : bool, optional\n Whether this layer should only feature as an activation layer, i.e., skip the affine transformation\n\n Returns\n -------\n\n cons_with_slack : np.ndarray\n A numpy array containing added constraints\n\n sos_cons : np.ndarray\n A numpy array containing added constraints\n\n \"\"\"\n\n # Initialise values for easy access and create empty constraint arrays\n n_samples = layer.input.shape[0]\n n_nodes_left = layer.input.shape[-1]\n n_nodes_right = layer.output.shape[-1]\n sos_cons = np.zeros((n_samples, n_nodes_right), dtype=object)\n cons_with_slack = np.zeros((n_samples, n_nodes_right), dtype=object)\n\n # Perform some basic activity based bound propagation\n propagation_success, lbs, ubs = propagate_identity_bounds(\n layer, n_samples, n_nodes_left, n_nodes_right, activation_only\n )\n\n # Iterate over all nodes on the right hand side and create the appropriate constraints\n for i in range(n_samples):\n for j in range(n_nodes_right):\n if layer.output[i][j].getLbOriginal() < 0:\n layer.scip_model.chgVarLb(layer.output[i][j], 0)\n name = layer.unique_naming_prefix + f\"slack_{i}_{j}\"\n if activation_only:\n cons_with_slack[i][j] = layer.scip_model.addCons(\n layer.output[i][j] == layer.input[i][j] + slack[i][j], name=name\n )\n else:\n rhs = quicksum(layer.coefs[k][j] * layer.input[i][k] for k in range(n_nodes_left))\n rhs += layer.intercept[j] + slack[i][j]\n cons_with_slack[i][j] = layer.scip_model.addCons(\n layer.output[i][j] == rhs, name=name\n )\n # Propagate bounds\n if propagation_success:\n if abs(lbs[i][j]) < 10**5:\n output_lb = layer.output[i][j].getLbOriginal()\n layer.scip_model.chgVarLb(\n layer.output[i][j], max(max(lbs[i][j], 0), output_lb)\n )\n layer.scip_model.chgVarUb(slack[i][j], max(-lbs[i][j], 0))\n if abs(ubs[i][j]) < 10**5:\n output_ub = layer.output[i][j].getUbOriginal()\n layer.scip_model.chgVarUb(\n layer.output[i][j], min(max(ubs[i][j], 0), output_ub)\n )\n layer.scip_model.chgVarLb(slack[i][j], max(-ubs[i][j], 0))\n name = layer.unique_naming_prefix + f\"sos_{i}_{j}\"\n sos_cons[i][j] = layer.scip_model.addConsSOS1(\n [layer.output[i][j], slack[i][j]], name=name\n )\n\n return cons_with_slack, sos_cons" }, { "identifier": "add_sigmoid_activation_constraint_layer", "path": "src/pyscipopt_ml/modelling/neuralnet/activations.py", "snippet": "def add_sigmoid_activation_constraint_layer(layer, activation_only=True):\n \"\"\"\n MIP model for Sigmoid activation on a layer\n\n Parameters\n ----------\n layer : AbstractNNLayer\n Layer to which activation is applied.\n\n activation_only : bool, optional\n Whether this layer should only feature as an activation layer, i.e., skip the affine transformation\n\n Returns\n -------\n\n sigmoid_cons : np.ndarray\n A numpy array containing added constraints\n\n \"\"\"\n\n # Initialise values for easy access and create empty constraint arrays\n n_samples = layer.input.shape[0]\n n_nodes_left = layer.input.shape[-1]\n n_nodes_right = layer.output.shape[-1]\n sigmoid_cons = np.zeros((n_samples, n_nodes_right), dtype=object)\n\n # Iterate over all nodes on the right hand side and create the appropriate constraints\n for i in range(n_samples):\n for j in range(n_nodes_right):\n if layer.output[i][j].getLbOriginal() < 0:\n layer.scip_model.chgVarLb(layer.output[i][j], 0)\n if layer.output[i][j].getUbOriginal() > 1:\n layer.scip_model.chgVarUb(layer.output[i][j], 1)\n if activation_only:\n x = layer.input[i][j]\n else:\n x = (\n quicksum(layer.coefs[k][j] * layer.input[i][k] for k in range(n_nodes_left))\n + layer.intercept[j]\n )\n name = layer.unique_naming_prefix + f\"sigmoid_{i}_{j}\"\n sigmoid_cons[i][j] = layer.scip_model.addCons(\n layer.output[i][j] == 1 / (1 + exp(-x)), name=name\n )\n\n return sigmoid_cons" }, { "identifier": "add_tanh_activation_constraint_layer", "path": "src/pyscipopt_ml/modelling/neuralnet/activations.py", "snippet": "def add_tanh_activation_constraint_layer(layer, activation_only=True):\n \"\"\"\n MIP model for tanh activation on a layer\n\n Parameters\n ----------\n layer : AbstractNNLayer\n Layer to which activation is applied.\n\n activation_only : bool, optional\n Whether this layer should only feature as an activation layer, i.e., skip the affine transformation\n\n Returns\n -------\n\n tanh_cons : np.ndarray\n A numpy array containing added constraints\n\n \"\"\"\n\n # Initialise values for easy access and create empty constraint arrays\n n_samples = layer.input.shape[0]\n n_nodes_left = layer.input.shape[-1]\n n_nodes_right = layer.output.shape[-1]\n tanh_cons = np.zeros((n_samples, n_nodes_right), dtype=object)\n\n # Iterate over all nodes on the right hand side and create the appropriate constraints\n for i in range(n_samples):\n for j in range(n_nodes_right):\n if layer.output[i][j].getLbOriginal() < -1:\n layer.scip_model.chgVarLb(layer.output[i][j], -1)\n if layer.output[i][j].getUbOriginal() > 1:\n layer.scip_model.chgVarUb(layer.output[i][j], 1)\n if activation_only:\n x = layer.input[i][j]\n else:\n x = (\n quicksum(layer.coefs[k][j] * layer.input[i][k] for k in range(n_nodes_left))\n + layer.intercept[j]\n )\n name = layer.unique_naming_prefix + f\"tanh_{i}_{j}\"\n tanh_cons[i][j] = layer.scip_model.addCons(\n layer.output[i][j] == (1 - exp(-2 * x)) / (1 + exp(-2 * x)), name=name\n )\n\n return tanh_cons" } ]
from ...exceptions import ParameterError from ..base_predictor_constraint import AbstractPredictorConstr from ..var_utils import create_vars from .activations import ( add_identity_activation_constraint_layer, add_relu_activation_constraint_layer, add_sigmoid_activation_constraint_layer, add_tanh_activation_constraint_layer, )
6,148
raise AssertionError("Cannot compute the error of an individual layer") class ActivationLayer(AbstractNNLayer): """Class to build one activation layer of a neural network.""" def __init__( self, scip_model, output_vars, input_vars, activation_function, unique_naming_prefix, **kwargs, ): super().__init__( scip_model, input_vars, output_vars, activation_function, unique_naming_prefix, **kwargs, ) def _create_output_vars(self, input_vars): output_vars = create_vars( input_vars.shape, vtype="C", lb=None, ub=None, name_prefix=self.unique_naming_prefix + "output", ) return output_vars def _mip_model(self, **kwargs): """Add the layer to model.""" if self.activation == "relu": slack = create_vars( self.scip_model, (self.input.shape[0], self.output.shape[-1]), vtype="C", lb=0.0, ub=None, name_prefix=self.unique_naming_prefix + "slack", ) affine_slack_cons, sos_cons = add_relu_activation_constraint_layer( self, slack, activation_only=True ) self._created_vars.append(slack) self._created_cons.append(affine_slack_cons) self._created_cons.append(sos_cons) elif self.activation == "logistic": sigmoid_cons = add_sigmoid_activation_constraint_layer(self, activation_only=True) self._created_cons.append(sigmoid_cons) elif self.activation == "tanh": tanh_cons = add_tanh_activation_constraint_layer(self, activation_only=True) self._created_cons.append(tanh_cons) else: raise ParameterError(f"Activation layer of type {self.activation} shouldn't exist") class DenseLayer(AbstractNNLayer): """Class to build one layer of a neural network.""" def __init__( self, scip_model, input_vars, layer_coefs, layer_intercept, output_vars, activation_function, unique_naming_prefix, **kwargs, ): self.coefs = layer_coefs self.intercept = layer_intercept super().__init__( scip_model, input_vars, output_vars, activation_function, unique_naming_prefix, **kwargs, ) def _create_output_vars(self, input_vars): output_vars = create_vars( (input_vars.shape[0], self.coefs.shape[-1]), vtype="C", lb=None, ub=None, name_prefix=self.unique_naming_prefix + "output", ) return output_vars def _mip_model(self, **kwargs): """Add the layer to model.""" if self.activation == "relu": slack = create_vars( self.scip_model, (self.input.shape[0], self.output.shape[-1]), vtype="C", lb=0.0, ub=None, name_prefix=self.unique_naming_prefix + "slack", ) affine_slack_cons, sos_cons = add_relu_activation_constraint_layer( self, slack, activation_only=False ) self._created_vars.append(slack) self._created_cons.append(affine_slack_cons) self._created_cons.append(sos_cons) elif self.activation == "logistic": sigmoid_cons = add_sigmoid_activation_constraint_layer(self, activation_only=False) self._created_cons.append(sigmoid_cons) elif self.activation == "tanh": tanh_cons = add_tanh_activation_constraint_layer(self, activation_only=False) self._created_cons.append(tanh_cons) elif self.activation == "identity":
"""Bases classes for modeling neural network layers.""" class AbstractNNLayer(AbstractPredictorConstr): """Abstract class for NN layers.""" def __init__( self, scip_model, input_vars, output_vars, activation, unique_naming_prefix, **kwargs, ): self.activation = activation AbstractPredictorConstr.__init__( self, scip_model, input_vars, output_vars, unique_naming_prefix, **kwargs ) def get_error(self, eps=None): # We can't compute externally the error of a layer raise AssertionError("Cannot compute the error of an individual layer") class ActivationLayer(AbstractNNLayer): """Class to build one activation layer of a neural network.""" def __init__( self, scip_model, output_vars, input_vars, activation_function, unique_naming_prefix, **kwargs, ): super().__init__( scip_model, input_vars, output_vars, activation_function, unique_naming_prefix, **kwargs, ) def _create_output_vars(self, input_vars): output_vars = create_vars( input_vars.shape, vtype="C", lb=None, ub=None, name_prefix=self.unique_naming_prefix + "output", ) return output_vars def _mip_model(self, **kwargs): """Add the layer to model.""" if self.activation == "relu": slack = create_vars( self.scip_model, (self.input.shape[0], self.output.shape[-1]), vtype="C", lb=0.0, ub=None, name_prefix=self.unique_naming_prefix + "slack", ) affine_slack_cons, sos_cons = add_relu_activation_constraint_layer( self, slack, activation_only=True ) self._created_vars.append(slack) self._created_cons.append(affine_slack_cons) self._created_cons.append(sos_cons) elif self.activation == "logistic": sigmoid_cons = add_sigmoid_activation_constraint_layer(self, activation_only=True) self._created_cons.append(sigmoid_cons) elif self.activation == "tanh": tanh_cons = add_tanh_activation_constraint_layer(self, activation_only=True) self._created_cons.append(tanh_cons) else: raise ParameterError(f"Activation layer of type {self.activation} shouldn't exist") class DenseLayer(AbstractNNLayer): """Class to build one layer of a neural network.""" def __init__( self, scip_model, input_vars, layer_coefs, layer_intercept, output_vars, activation_function, unique_naming_prefix, **kwargs, ): self.coefs = layer_coefs self.intercept = layer_intercept super().__init__( scip_model, input_vars, output_vars, activation_function, unique_naming_prefix, **kwargs, ) def _create_output_vars(self, input_vars): output_vars = create_vars( (input_vars.shape[0], self.coefs.shape[-1]), vtype="C", lb=None, ub=None, name_prefix=self.unique_naming_prefix + "output", ) return output_vars def _mip_model(self, **kwargs): """Add the layer to model.""" if self.activation == "relu": slack = create_vars( self.scip_model, (self.input.shape[0], self.output.shape[-1]), vtype="C", lb=0.0, ub=None, name_prefix=self.unique_naming_prefix + "slack", ) affine_slack_cons, sos_cons = add_relu_activation_constraint_layer( self, slack, activation_only=False ) self._created_vars.append(slack) self._created_cons.append(affine_slack_cons) self._created_cons.append(sos_cons) elif self.activation == "logistic": sigmoid_cons = add_sigmoid_activation_constraint_layer(self, activation_only=False) self._created_cons.append(sigmoid_cons) elif self.activation == "tanh": tanh_cons = add_tanh_activation_constraint_layer(self, activation_only=False) self._created_cons.append(tanh_cons) elif self.activation == "identity":
affine_cons = add_identity_activation_constraint_layer(self)
3
2023-12-10 20:28:22+00:00
8k
Yanyutin753/CowAndPandoraNext
plugins/godcmd/godcmd.py
[ { "identifier": "Bridge", "path": "bridge/bridge.py", "snippet": "class Bridge(object):\n def __init__(self):\n self.btype = {\n \"chat\": const.CHATGPT,\n \"voice_to_text\": conf().get(\"voice_to_text\", \"openai\"),\n \"text_to_voice\": conf().get(\"text_to_voice\", \"google\"),\n \"translate\": conf().get(\"translate\", \"baidu\"),\n }\n model_type = conf().get(\"model\")\n if model_type in [\"text-davinci-003\"]:\n self.btype[\"chat\"] = const.OPEN_AI\n if conf().get(\"use_azure_chatgpt\", False):\n self.btype[\"chat\"] = const.CHATGPTONAZURE\n if model_type in [\"wenxin\"]:\n self.btype[\"chat\"] = const.BAIDU\n if model_type in [\"xunfei\"]:\n self.btype[\"chat\"] = const.XUNFEI\n if conf().get(\"use_linkai\") and conf().get(\"linkai_api_key\"):\n self.btype[\"chat\"] = const.LINKAI\n self.bots = {}\n\n def get_bot(self, typename):\n if self.bots.get(typename) is None:\n logger.info(\"create bot {} for {}\".format(self.btype[typename], typename))\n if typename == \"text_to_voice\":\n self.bots[typename] = create_voice(self.btype[typename])\n elif typename == \"voice_to_text\":\n self.bots[typename] = create_voice(self.btype[typename])\n elif typename == \"chat\":\n self.bots[typename] = create_bot(self.btype[typename])\n elif typename == \"translate\":\n self.bots[typename] = create_translator(self.btype[typename])\n return self.bots[typename]\n\n def get_bot_type(self, typename):\n return self.btype[typename]\n\n def fetch_reply_content(self, query, context: Context) -> Reply:\n return self.get_bot(\"chat\").reply(query, context)\n\n def fetch_voice_to_text(self, voiceFile) -> Reply:\n return self.get_bot(\"voice_to_text\").voiceToText(voiceFile)\n\n def fetch_text_to_voice(self, text) -> Reply:\n return self.get_bot(\"text_to_voice\").textToVoice(text)\n\n def fetch_translate(self, text, from_lang=\"\", to_lang=\"en\") -> Reply:\n return self.get_bot(\"translate\").translate(text, from_lang, to_lang)\n\n def reset_bot(self):\n \"\"\"\n 重置bot路由\n \"\"\"\n self.__init__()" }, { "identifier": "ContextType", "path": "bridge/context.py", "snippet": "class ContextType(Enum):\n TEXT = 1 # 文本消息\n VOICE = 2 # 音频消息\n IMAGE = 3 # 图片消息\n IMAGE_CREATE = 10 # 创建图片命令\n JOIN_GROUP = 20 # 加入群聊\n PATPAT = 21 # 拍了拍\n\n def __str__(self):\n return self.name" }, { "identifier": "Reply", "path": "bridge/reply.py", "snippet": "class Reply:\n def __init__(self, type: ReplyType = None, content=None):\n self.type = type\n self.content = content\n\n def __str__(self):\n return \"Reply(type={}, content={})\".format(self.type, self.content)" }, { "identifier": "ReplyType", "path": "bridge/reply.py", "snippet": "class ReplyType(Enum):\n TEXT = 1 # 文本\n VOICE = 2 # 音频文件\n IMAGE = 3 # 图片文件\n IMAGE_URL = 4 # 图片URL\n\n INFO = 9\n ERROR = 10\n\n def __str__(self):\n return self.name" }, { "identifier": "const", "path": "common/const.py", "snippet": "OPEN_AI = \"openAI\"\nCHATGPT = \"chatGPT\"\nBAIDU = \"baidu\"\nXUNFEI = \"xunfei\"\nCHATGPTONAZURE = \"chatGPTOnAzure\"\nLINKAI = \"linkai\"\nVERSION = \"1.3.0\"\nMODEL_LIST = [\"gpt-3.5-turbo\", \"gpt-3.5-turbo-16k\", \"gpt-4\", \"wenxin\", \"xunfei\"]" }, { "identifier": "load_config", "path": "config.py", "snippet": "class Config(dict):\n def __init__(self, d=None):\n def __getitem__(self, key):\n def __setitem__(self, key, value):\n def get(self, key, default=None):\n def get_user_data(self, user) -> dict:\n def load_user_datas(self):\n def save_user_datas(self):\ndef load_config():\ndef get_root():\ndef read_file(path):\ndef conf():\ndef get_appdata_dir():\ndef subscribe_msg():\ndef write_plugin_config(pconf: dict):\ndef pconf(plugin_name: str) -> dict:" }, { "identifier": "Share_token_config", "path": "auto_share_token.py", "snippet": "def Share_token_config():\n # 获取当前代码文件的绝对路径\n current_dir = os.path.dirname(os.path.abspath(__file__))\n\n # 创建指向config.json的相对路径\n config_path = os.path.join(current_dir, 'plugins', '..', 'config.json')\n\n # 打开并读取config.json文件\n with open(config_path, 'r') as file:\n config_data = json.load(file)\n TokensTool_url = config_data[\"TokensTool_url\"]\n\n resp = requests.get(TokensTool_url)\n if resp.status_code == 200:\n pool_token = resp.json()['data']\n print('share token: {}'.format(pool_token))\n else:\n err_str = resp.text.replace('\\n', '').replace('\\r', '').strip()\n print('share token failed: {}'.format(err_str))\n pool_token = err_str\n\n if re.match(r'^(fk-|pk-)', pool_token):\n\n with open(config_path, 'r') as file:\n config_data = json.load(file)\n\n config_data[\"open_ai_api_key\"] = pool_token\n\n # 将更新后的内容写回config.json文件\n with open(config_path, 'w') as file:\n json.dump(config_data, file, indent=4, ensure_ascii=False)\n\n print(f\"open_ai_api_key has been updated to: {pool_token}\")\n\n return pool_token" } ]
import random import string import plugins from typing import Tuple from bridge.bridge import Bridge from bridge.context import ContextType from bridge.reply import Reply, ReplyType from common import const from config import load_config, global_config from plugins import * from auto_share_token import Share_token_config from custom_instructions import get_messages, modify_messages_user, modify_messages_model
4,688
reply = Reply() reply.type = ReplyType.ERROR reply.content = f"空指令,输入#help查看指令列表\n" e_context["reply"] = reply e_context.action = EventAction.BREAK_PASS return # msg = e_context['context']['msg'] channel = e_context["channel"] user = e_context["context"]["receiver"] session_id = e_context["context"]["session_id"] isgroup = e_context["context"].get("isgroup", False) bottype = Bridge().get_bot_type("chat") bot = Bridge().get_bot("chat") # 将命令和参数分割 content = content[1:].strip() # 移除前缀 command_parts = content.split(maxsplit=1) cmd = command_parts[0] # 检查是否有参数 if len(command_parts) > 1: if cmd == 'update_ci_user' or cmd == 'update_ci_model': args = [command_parts[1]] # 使用剩余的内容作为参数 else: args = command_parts[1].split() # 用空格分割参数 else: args = [] # 没有参数 isadmin = False if user in self.admin_users: isadmin = True ok = False result = "string" if any(cmd in info["alias"] for info in COMMANDS.values()): cmd = next(c for c, info in COMMANDS.items() if cmd in info["alias"]) if cmd == "auth": ok, result = self.authenticate(user, args, isadmin, isgroup) elif cmd == "help" or cmd == "helpp": if len(args) == 0: ok, result = True, get_help_text(isadmin, isgroup) else: # This can replace the helpp command plugins = PluginManager().list_plugins() query_name = args[0].upper() # search name and namecn for name, plugincls in plugins.items(): if not plugincls.enabled: continue if query_name == name or query_name == plugincls.namecn: ok, result = True, PluginManager().instances[name].get_help_text(isgroup=isgroup, isadmin=isadmin, verbose=True) break if not ok: result = "插件不存在或未启用" elif cmd == "model": if not isadmin and not self.is_admin_in_group(e_context["context"]): ok, result = False, "需要管理员权限执行" elif len(args) == 0: ok, result = True, "当前模型为: " + str(conf().get("model")) elif len(args) == 1: if args[0] not in const.MODEL_LIST: ok, result = False, "模型名称不存在" else: conf()["model"] = args[0] Bridge().reset_bot() ok, result = True, "模型设置为: " + str(conf().get("model")) elif cmd == "id": ok, result = True, user elif cmd == "set_openai_api_key": if len(args) == 1: user_data = conf().get_user_data(user) user_data["openai_api_key"] = args[0] ok, result = True, "你的OpenAI私有api_key已设置为" + args[0] else: ok, result = False, "请提供一个api_key" elif cmd == "reset_openai_api_key": try: user_data = conf().get_user_data(user) user_data.pop("openai_api_key") ok, result = True, "你的OpenAI私有api_key已清除" except Exception as e: ok, result = False, "你没有设置私有api_key" elif cmd == "set_gpt_model": if len(args) == 1: user_data = conf().get_user_data(user) user_data["gpt_model"] = args[0] ok, result = True, "你的GPT模型已设置为" + args[0] else: ok, result = False, "请提供一个GPT模型" elif cmd == "gpt_model": user_data = conf().get_user_data(user) model = conf().get("model") if "gpt_model" in user_data: model = user_data["gpt_model"] ok, result = True, "你的GPT模型为" + str(model) elif cmd == "reset_gpt_model": try: user_data = conf().get_user_data(user) user_data.pop("gpt_model") ok, result = True, "你的GPT模型已重置" except Exception as e: ok, result = False, "你没有设置私有GPT模型" elif cmd == "reset": if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI]: bot.sessions.clear_session(session_id) channel.cancel_session(session_id) ok, result = True, "会话已重置" else: ok, result = False, "当前对话机器人不支持重置会话" logger.debug("[Godcmd] command: %s by %s" % (cmd, user)) elif any(cmd in info["alias"] for info in ADMIN_COMMANDS.values()): if isadmin: if isgroup: ok, result = False, "群聊不可执行管理员指令" else: cmd = next(c for c, info in ADMIN_COMMANDS.items() if cmd in info["alias"]) if cmd == "stop": self.isrunning = False ok, result = True, "服务已暂停" elif cmd == "resume": self.isrunning = True ok, result = True, "服务已恢复" elif cmd == "update_token": print("开始修改分享token")
# encoding:utf-8 # 定义指令集 COMMANDS = { "help": { "alias": ["help", "帮助"], "desc": "回复此帮助", }, "helpp": { "alias": ["help", "帮助"], # 与help指令共用别名,根据参数数量区分 "args": ["插件名"], "desc": "回复指定插件的详细帮助", }, "auth": { "alias": ["auth", "认证"], "args": ["口令"], "desc": "管理员认证", }, "model": { "alias": ["model", "模型"], "desc": "查看和设置全局模型", }, "set_openai_api_key": { "alias": ["set_openai_api_key"], "args": ["api_key"], "desc": "设置你的OpenAI私有api_key", }, "reset_openai_api_key": { "alias": ["reset_openai_api_key"], "desc": "重置为默认的api_key", }, "set_gpt_model": { "alias": ["set_gpt_model"], "desc": "设置你的私有模型", }, "reset_gpt_model": { "alias": ["reset_gpt_model"], "desc": "重置你的私有模型", }, "gpt_model": { "alias": ["gpt_model"], "desc": "查询你使用的模型", }, "id": { "alias": ["id", "用户"], "desc": "获取用户id", # wechaty和wechatmp的用户id不会变化,可用于绑定管理员 }, "reset": { "alias": ["reset", "重置会话"], "desc": "重置会话", }, } ADMIN_COMMANDS = { "resume": { "alias": ["resume", "恢复服务"], "desc": "恢复服务", }, "update_token": { "alias": ["update_token", "更新chatgpt"], "desc": "更新chatgpt", }, "stop": { "alias": ["stop", "暂停服务"], "desc": "暂停服务", }, "reconf": { "alias": ["reconf", "重载配置"], "desc": "重载配置(不包含插件配置)", }, "resetall": { "alias": ["resetall", "重置所有会话"], "desc": "重置所有会话", }, "scanp": { "alias": ["scanp", "扫描插件"], "desc": "扫描插件目录是否有新插件", }, "plist": { "alias": ["plist", "插件"], "desc": "打印当前插件列表", }, "setpri": { "alias": ["setpri", "设置插件优先级"], "args": ["插件名", "优先级"], "desc": "设置指定插件的优先级,越大越优先", }, "reloadp": { "alias": ["reloadp", "重载插件"], "args": ["插件名"], "desc": "重载指定插件配置", }, "enablep": { "alias": ["enablep", "启用插件"], "args": ["插件名"], "desc": "启用指定插件", }, "disablep": { "alias": ["disablep", "禁用插件"], "args": ["插件名"], "desc": "禁用指定插件", }, "installp": { "alias": ["installp", "安装插件"], "args": ["仓库地址或插件名"], "desc": "安装指定插件", }, "uninstallp": { "alias": ["uninstallp", "卸载插件"], "args": ["插件名"], "desc": "卸载指定插件", }, "updatep": { "alias": ["updatep", "更新插件"], "args": ["插件名"], "desc": "更新指定插件", }, "debug": { "alias": ["debug", "调试模式", "DEBUG"], "desc": "开启机器调试日志", }, } # 定义帮助函数 def get_help_text(isadmin, isgroup): help_text = "通用指令:\n" for cmd, info in COMMANDS.items(): if cmd == "auth": # 不提示认证指令 continue if cmd == "id" and conf().get("channel_type", "wx") not in ["wxy", "wechatmp"]: continue alias = ["#" + a for a in info["alias"][:1]] help_text += f"{','.join(alias)} " if "args" in info: args = [a for a in info["args"]] help_text += f"{' '.join(args)}" help_text += f": {info['desc']}\n" # 插件指令 plugins = PluginManager().list_plugins() help_text += "\n目前可用插件有:" for plugin in plugins: if plugins[plugin].enabled and not plugins[plugin].hidden: namecn = plugins[plugin].namecn help_text += "\n%s:" % namecn help_text += PluginManager().instances[plugin].get_help_text(verbose=False).strip() if ADMIN_COMMANDS and isadmin: help_text += "\n\n管理员指令:\n" for cmd, info in ADMIN_COMMANDS.items(): alias = ["#" + a for a in info["alias"][:1]] help_text += f"{','.join(alias)} " if "args" in info: args = [a for a in info["args"]] help_text += f"{' '.join(args)}" help_text += f": {info['desc']}\n" return help_text @plugins.register( name="Godcmd", desire_priority=999, hidden=True, desc="为你的机器人添加指令集,有用户和管理员两种角色,加载顺序请放在首位,初次运行后插件目录会生成配置文件, 填充管理员密码后即可认证", version="1.0", author="lanvent", ) class Godcmd(Plugin): def __init__(self): super().__init__() config_path = os.path.join(os.path.dirname(__file__), "config.json") gconf = super().load_config() if not gconf: if not os.path.exists(config_path): gconf = {"password": "", "admin_users": []} with open(config_path, "w") as f: json.dump(gconf, f, indent=4) if gconf["password"] == "": self.temp_password = "".join(random.sample(string.digits, 4)) logger.info("[Godcmd] 因未设置口令,本次的临时口令为%s。" % self.temp_password) else: self.temp_password = None custom_commands = conf().get("clear_memory_commands", []) for custom_command in custom_commands: if custom_command and custom_command.startswith("#"): custom_command = custom_command[1:] if custom_command and custom_command not in COMMANDS["reset"]["alias"]: COMMANDS["reset"]["alias"].append(custom_command) self.password = gconf["password"] self.admin_users = gconf["admin_users"] # 预存的管理员账号,这些账号不需要认证。itchat的用户名每次都会变,不可用 self.isrunning = True # 机器人是否运行中 self.handlers[Event.ON_HANDLE_CONTEXT] = self.on_handle_context logger.info("[Godcmd] inited") def on_handle_context(self, e_context: EventContext): context_type = e_context["context"].type if context_type != ContextType.TEXT: if not self.isrunning: e_context.action = EventAction.BREAK_PASS return content = e_context["context"].content logger.debug("[Godcmd] on_handle_context. content: %s" % content) if content.startswith("#"): if len(content) == 1: reply = Reply() reply.type = ReplyType.ERROR reply.content = f"空指令,输入#help查看指令列表\n" e_context["reply"] = reply e_context.action = EventAction.BREAK_PASS return # msg = e_context['context']['msg'] channel = e_context["channel"] user = e_context["context"]["receiver"] session_id = e_context["context"]["session_id"] isgroup = e_context["context"].get("isgroup", False) bottype = Bridge().get_bot_type("chat") bot = Bridge().get_bot("chat") # 将命令和参数分割 content = content[1:].strip() # 移除前缀 command_parts = content.split(maxsplit=1) cmd = command_parts[0] # 检查是否有参数 if len(command_parts) > 1: if cmd == 'update_ci_user' or cmd == 'update_ci_model': args = [command_parts[1]] # 使用剩余的内容作为参数 else: args = command_parts[1].split() # 用空格分割参数 else: args = [] # 没有参数 isadmin = False if user in self.admin_users: isadmin = True ok = False result = "string" if any(cmd in info["alias"] for info in COMMANDS.values()): cmd = next(c for c, info in COMMANDS.items() if cmd in info["alias"]) if cmd == "auth": ok, result = self.authenticate(user, args, isadmin, isgroup) elif cmd == "help" or cmd == "helpp": if len(args) == 0: ok, result = True, get_help_text(isadmin, isgroup) else: # This can replace the helpp command plugins = PluginManager().list_plugins() query_name = args[0].upper() # search name and namecn for name, plugincls in plugins.items(): if not plugincls.enabled: continue if query_name == name or query_name == plugincls.namecn: ok, result = True, PluginManager().instances[name].get_help_text(isgroup=isgroup, isadmin=isadmin, verbose=True) break if not ok: result = "插件不存在或未启用" elif cmd == "model": if not isadmin and not self.is_admin_in_group(e_context["context"]): ok, result = False, "需要管理员权限执行" elif len(args) == 0: ok, result = True, "当前模型为: " + str(conf().get("model")) elif len(args) == 1: if args[0] not in const.MODEL_LIST: ok, result = False, "模型名称不存在" else: conf()["model"] = args[0] Bridge().reset_bot() ok, result = True, "模型设置为: " + str(conf().get("model")) elif cmd == "id": ok, result = True, user elif cmd == "set_openai_api_key": if len(args) == 1: user_data = conf().get_user_data(user) user_data["openai_api_key"] = args[0] ok, result = True, "你的OpenAI私有api_key已设置为" + args[0] else: ok, result = False, "请提供一个api_key" elif cmd == "reset_openai_api_key": try: user_data = conf().get_user_data(user) user_data.pop("openai_api_key") ok, result = True, "你的OpenAI私有api_key已清除" except Exception as e: ok, result = False, "你没有设置私有api_key" elif cmd == "set_gpt_model": if len(args) == 1: user_data = conf().get_user_data(user) user_data["gpt_model"] = args[0] ok, result = True, "你的GPT模型已设置为" + args[0] else: ok, result = False, "请提供一个GPT模型" elif cmd == "gpt_model": user_data = conf().get_user_data(user) model = conf().get("model") if "gpt_model" in user_data: model = user_data["gpt_model"] ok, result = True, "你的GPT模型为" + str(model) elif cmd == "reset_gpt_model": try: user_data = conf().get_user_data(user) user_data.pop("gpt_model") ok, result = True, "你的GPT模型已重置" except Exception as e: ok, result = False, "你没有设置私有GPT模型" elif cmd == "reset": if bottype in [const.OPEN_AI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI, const.BAIDU, const.XUNFEI]: bot.sessions.clear_session(session_id) channel.cancel_session(session_id) ok, result = True, "会话已重置" else: ok, result = False, "当前对话机器人不支持重置会话" logger.debug("[Godcmd] command: %s by %s" % (cmd, user)) elif any(cmd in info["alias"] for info in ADMIN_COMMANDS.values()): if isadmin: if isgroup: ok, result = False, "群聊不可执行管理员指令" else: cmd = next(c for c, info in ADMIN_COMMANDS.items() if cmd in info["alias"]) if cmd == "stop": self.isrunning = False ok, result = True, "服务已暂停" elif cmd == "resume": self.isrunning = True ok, result = True, "服务已恢复" elif cmd == "update_token": print("开始修改分享token")
share_token = Share_token_config()
6
2023-12-14 15:21:17+00:00
8k
nerdslab/bams
mouse_triplets.py
[ { "identifier": "Dataset", "path": "bams/data/dataset.py", "snippet": "class Dataset(CachedDataset):\n r\"\"\"Dataset for holding time series data, with input and target features.\n\n Caching is possible if you need to avoid processing the data every time you run\n the script. The cache file will be saved in `cache_path` and will be loaded if\n `cache` is set to True. Be careful when using the cache, as it will not be updated\n if the data changes. Only use once the data processing pipeline is finalized. \n Deleteing the cache file will force the data to be processed again.\n\n Args:\n input_feats (np.ndarray): Array of shape (num_sequences, sequence_len, num_feats).\n Use np.nan for missing values or padding frames.\n target_feats (np.ndarray): Array of shape (num_sequences, sequence_len, num_feats).\n Use np.nan for missing values or padding frames.\n ignore_frames (np.ndarray): Array of shape (num_sequences, sequence_len).\n Use True for missing values or padding frames.\n hoa_bins (int): Number of bins for the histograms of actions.\n hoa_window (int): Window size for the histograms of actions.\n cache_path (str): Path to the cache file.\n cache (bool): Whether to use the cache file.\n \"\"\"\n\n def __init__(\n self,\n input_feats,\n target_feats,\n ignore_frames,\n *,\n hoa_bins=32,\n hoa_window=30,\n cache_path=None,\n cache=False,\n ):\n self.input_feats = input_feats\n self.target_feats = target_feats\n self.ignore_frames = ignore_frames\n\n assert hoa_bins <= 255, \"n_bins must be less than 256, got {}.\".format(hoa_bins)\n self.hoa_bins = hoa_bins\n assert hoa_window <= 255, \"hoa_window must be less than 256, got {}.\".format(\n hoa_window\n )\n self.hoa_window = hoa_window\n cache_path = \"./data/tmp\" if cache_path is None else cache_path\n cache_path = cache_path + f\"_bins{self.hoa_bins}.pkl\"\n\n super().__init__(cache_path, cache)\n\n @staticmethod\n def cache_is_available(cache_path, hoa_bins):\n return os.path.exists(cache_path + f\"_bins{hoa_bins}.pkl\")\n\n def process(self):\n # quantize the target features in order to create the histogram of actions\n bins = np.zeros(\n (self.hoa_bins - 1, self.target_feats.shape[-1]), dtype=np.float32\n )\n quantized_target_feats = np.zeros_like(self.target_feats, dtype=np.uint8)\n\n # pre-compute histogram of actions for target features\n num_feats = self.target_feats.shape[2]\n for i in tqdm(range(num_feats)):\n # find the range of values (low, high) for each feature\n feat = self.target_feats[..., i].flatten() \n feat = feat[~np.isnan(feat)]\n feat = feat[np.abs(feat) > 0.1]\n low, high = np.nanpercentile(feat, [0.5, 99.5])\n\n # compute histogram\n bins[..., i] = np.linspace(low, high, self.hoa_bins - 1)\n quantized_target_feats[..., i] = np.digitize(\n self.target_feats[..., i], bins[..., i]\n ).astype(np.uint8)\n\n # normalize\n self.target_feats[..., i] = self.target_feats[..., i] / np.max(\n [np.abs(low), np.abs(high)]\n )\n\n # normalize input features\n for i in range(self.input_feats.shape[2]):\n # z-score\n self.input_feats[..., i] = self.input_feats[..., i] / np.nanmax(\n np.abs(self.input_feats[..., i])\n )\n\n data = dict(\n input_feats=self.input_feats,\n target_feats=self.target_feats,\n quantized_target_feats=quantized_target_feats,\n ignore_frames=self.ignore_frames,\n )\n return data\n\n def __getitem__(self, item):\n # make histogram of actions\n quantized_target_feat = self.quantized_target_feats[\n item\n ] # shape (sequence_len, num_feats)\n ignore_frames = self.ignore_frames[item] # shape (sequence_len,)\n\n rows, cols = np.indices(quantized_target_feat.shape)\n histogram_of_actions = np.zeros(\n (*quantized_target_feat.shape, self.hoa_bins), dtype=np.uint8\n )\n weights = np.zeros_like(self.ignore_frames[item], dtype=np.float32)\n for i in range(1, self.hoa_window + 1):\n histogram_of_actions[rows[:-i], cols[:-i], quantized_target_feat[:-i]] += 1\n weights[:-i] += 1 - self.ignore_frames[item][i:].astype(np.float32)\n\n histogram_of_actions = histogram_of_actions / self.hoa_window\n weights = weights / self.hoa_window\n\n ignore_frames[: -self.hoa_window] = True\n\n data = dict(\n input=self.input_feats[item],\n target_hist=histogram_of_actions,\n ignore_frames=self.ignore_frames[item],\n ignore_weights=weights,\n )\n return data\n\n def __len__(self):\n return self.input_feats.shape[0]\n\n @cached_property\n def input_size(self):\n return self.input_feats.shape[2]\n\n @cached_property\n def target_size(self):\n return self.target_feats.shape[2]" }, { "identifier": "diff", "path": "bams/data/utils.py", "snippet": "def diff(vec, axis=-1, h=1, padding=\"edge\"):\n assert padding in [\n \"zero\",\n \"edge\",\n ], \"Padding must be one of ['zero', 'edge'],\"\n \" got {}.\".format(padding)\n\n # move the target axis to the end\n vec = np.moveaxis(vec, axis, -1)\n\n # compute diff\n dvec = np.zeros_like(vec)\n dvec[..., h:] = vec[..., h:] - vec[..., :-h]\n\n # take care of padding the beginning\n if padding == \"edge\":\n for i in range(h):\n dvec[..., i] = dvec[..., h + 1]\n\n # move the axis back to its original position\n dvec = np.moveaxis(dvec, -1, axis)\n return dvec" }, { "identifier": "to_polar_coordinates", "path": "bams/data/utils.py", "snippet": "def to_polar_coordinates(vec):\n r = np.linalg.norm(vec, axis=-1)\n theta = np.arctan2(vec[..., 1], vec[..., 0])\n return r, theta" }, { "identifier": "angle_clip", "path": "bams/data/utils.py", "snippet": "def angle_clip(theta):\n return np.mod(theta + np.pi, 2 * np.pi) - np.pi" }, { "identifier": "BAMS", "path": "bams/models/bams.py", "snippet": "class BAMS(nn.Module):\n r\"\"\"BAMS model.\n\n Args:\n input_size (int): Number of input features.\n predictor (dict): Parameters for the predictor MLP.\n encoders (dict[dict]): A dictionnary of encoders, where each key is the name of\n the encoder, and each value is a dictionnary of parameters for the encoder.\n Each encoder is a TemporalConvNet.\n \"\"\"\n\n def __init__(\n self,\n input_size,\n *,\n predictor=None,\n **encoder_kwargs,\n ):\n super().__init__()\n\n self.input_size = input_size\n self.representation_size = 0\n\n encoders = dict()\n for name, tcn_kwargs in encoder_kwargs.items():\n assert \"num_inputs\" not in tcn_kwargs\n encoders[name] = TemporalConvNet(num_inputs=input_size, **tcn_kwargs)\n self.representation_size += tcn_kwargs[\"num_channels\"][-1]\n\n self.encoders = torch.nn.ModuleDict(encoders)\n\n # hoa predictor (first layer is a lazy linear layer)\n self.predictor = MLP(**predictor)\n\n # byol predictors\n byol_predictors = dict()\n for name, tcn_kwargs in encoder_kwargs.items():\n emb_dim = tcn_kwargs[\"num_channels\"][-1]\n byol_predictors[name] = nn.Sequential(\n nn.Linear(emb_dim, emb_dim * 4, bias=False),\n nn.BatchNorm1d(emb_dim * 4, eps=1e-5, momentum=0.1),\n nn.ReLU(inplace=True),\n nn.Linear(emb_dim * 4, emb_dim, bias=True),\n )\n self.byol_predictors = torch.nn.ModuleDict(byol_predictors)\n\n def forward(self, x):\n # input shape: (B: batch_size, L:sequence_length, N: num_feats)\n # forward through TCNs\n embs = OrderedDict()\n byol_preds = OrderedDict()\n for name, encoder in self.encoders.items():\n embs[name] = encoder(x) # (B, L, N)\n flattened_emb = embs[name].flatten(0, 1) # (B*L, N)\n pred_emb = self.byol_predictors[name](flattened_emb)\n byol_preds[name] = pred_emb.reshape(embs[name].shape)\n\n # concatenate embeddings\n h = torch.cat(list(embs.values()), dim=2) # (B, L, N)\n\n # concatenate input and embeddings\n hx = torch.cat([h, x], dim=2)\n # prediction\n hoa_pred = self.predictor(hx)\n return embs, hoa_pred, byol_preds\n\n def __repr__(self) -> str:\n args = [\n f\" {name}: {encoder.__class__.__name__}\"\n f\" (receptive field: {encoder.receptive_field},\"\n f\" feature dim: {encoder.feat_dim})\"\n for name, encoder in self.encoders.items()\n ]\n args.append(\n f\" predictor: {self.predictor.__class__.__name__}\"\n f\" (input size: {self.input_size},\"\n f\" output size: {self.predictor.out_dim})\"\n )\n return \"{}([\\n{}\\n])\".format(self.__class__.__name__, \",\\n\".join(args))" }, { "identifier": "HoALoss", "path": "bams/hoa_loss.py", "snippet": "class HoALoss(nn.Module):\n def __init__(self, hoa_bins=32, skip_frames=60):\n super().__init__()\n\n self.hoa_bins = hoa_bins\n self.skip_frames = skip_frames\n\n def forward(self, target, pred, ignore_weights=None):\n r\"\"\"\n target: (B, L, N)\n pred: (B, L, N)\n ignore_weights: (B, L)\"\"\"\n n = target.size(2)\n\n # reshape\n target = target.reshape(-1, self.hoa_bins)\n pred = pred.reshape(-1, self.hoa_bins)\n \n # make each histogram sum to 1\n pred = torch.softmax(pred, dim=1)\n\n # compute EMD using Mallow's distance\n loss = earth_mover_distance(target, pred)\n\n # ignore first `self.skip_frames` frames\n ignore_weights[:, :self.skip_frames] = 1.0\n ignore_weights = ignore_weights.unsqueeze(2).repeat((1, 1, n, 1))\n weights = 1 - ignore_weights.view(-1)\n loss = torch.sum(loss * weights) / torch.sum(weights)\n return loss" } ]
import os import numpy as np import argparse import torch import torch.nn.functional as F from datetime import datetime from torch import optim from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from bams.data import Dataset from bams.data.utils import diff, to_polar_coordinates, angle_clip from bams.models import BAMS from bams import HoALoss
5,464
skip_frames = 100 view_1_id = ( torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames ) view_2_id = ( torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames ) view_1 = byol_preds["long_term"][torch.arange(batch_size), view_1_id] view_2 = embs["long_term"][torch.arange(batch_size), view_2_id] byol_loss_long_term = ( 1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean() ) # backprop loss = 5e2 * hoa_loss + 0.5 * byol_loss_short_term + 0.5 * byol_loss_long_term loss.backward() optimizer.step() step += 1 if step % log_every_step == 0: writer.add_scalar("train/hoa_loss", hoa_loss.item(), step) writer.add_scalar( "train/byol_loss_short_term", byol_loss_short_term.item(), step ) writer.add_scalar( "train/byol_loss_long_term", byol_loss_long_term.item(), step ) writer.add_scalar("train/total_loss", loss.item(), step) return step def main(): parser = argparse.ArgumentParser() parser.add_argument( "--job", default="train", const="train", nargs="?", choices=["train", "compute_representations"], help="select task", ) parser.add_argument("--data_root", type=str, default="./data/mabe") parser.add_argument("--cache_path", type=str, default="./data/mabe/mouse_triplet") parser.add_argument("--hoa_bins", type=int, default=32) parser.add_argument("--batch_size", type=int, default=32) parser.add_argument("--num_workers", type=int, default=16) parser.add_argument("--epochs", type=int, default=500) parser.add_argument("--lr", type=float, default=1e-3) parser.add_argument("--weight_decay", type=float, default=4e-5) parser.add_argument("--log_every_step", type=int, default=50) parser.add_argument("--ckpt_path", type=str, default=None) args = parser.parse_args() if args.job == "train": train(args) elif args.job == "compute_representations": compute_representations(args) def train(args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # dataset if not Dataset.cache_is_available(args.cache_path, args.hoa_bins): print("Processing data...") keypoints, split_mask, batch = load_mice_triplet(args.data_root) input_feats, target_feats, ignore_frames = mouse_feature_extractor(keypoints) else: print("No need to process data") input_feats = target_feats = ignore_frames = None dataset = Dataset( input_feats=input_feats, target_feats=target_feats, ignore_frames=ignore_frames, cache_path=args.cache_path, cache=True, hoa_bins=args.hoa_bins, hoa_window=30, ) print("Number of sequences:", len(dataset)) # prepare dataloaders train_loader = DataLoader( dataset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.num_workers, pin_memory=True, ) # build model model = BAMS( input_size=dataset.input_size, short_term=dict(num_channels=(64, 64, 32, 32), kernel_size=3), long_term=dict(num_channels=(64, 64, 64, 32, 32), kernel_size=3, dilation=4), predictor=dict( hidden_layers=(-1, 256, 512, 512, dataset.target_size * args.hoa_bins), ), # frame rate = 30, 6 steps = 200ms ).to(device) model_name = f"bams-mouse-triplet-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}" writer = SummaryWriter("runs/" + model_name) main_params = [p for name, p in model.named_parameters() if "byol" not in name] byol_params = list(model.byol_predictors.parameters()) optimizer = optim.AdamW( [{"params": main_params}, {"params": byol_params, "lr": args.lr * 10}], lr=args.lr, weight_decay=args.weight_decay, ) scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200], gamma=0.1)
############# # Load data # ############# def load_mice_triplet(path): # load raw train data (with annotations for 2 tasks) data_train = np.load( os.path.join(path, "mouse_triplet_train.npy"), allow_pickle=True ).item() sequence_ids_train, sequence_data_train = zip(*data_train["sequences"].items()) keypoints_train = np.stack([data["keypoints"] for data in sequence_data_train]) # load submission data (no annoations) data_submission = np.load( os.path.join(path, "mouse_triplet_test.npy"), allow_pickle=True ).item() sequence_ids_submission, sequence_data_submission = zip( *data_submission["sequences"].items() ) keypoints_submission = np.stack( [data["keypoints"] for data in sequence_data_submission] ) # concatenate train and submission data sequence_ids = np.concatenate([sequence_ids_train, sequence_ids_submission], axis=0) keypoints = np.concatenate([keypoints_train, keypoints_submission], axis=0) split_mask = np.ones(len(sequence_ids), dtype=bool) split_mask[-len(sequence_ids_submission) :] = False # treat each mouse independently, keep track of which video each mouse came from num_samples, sequence_length, num_mice, num_keypoints, _ = keypoints.shape keypoints = keypoints.transpose((0, 2, 1, 3, 4)) keypoints = keypoints.reshape((-1, sequence_length, num_keypoints, 2)) batch = np.repeat(np.arange(num_samples), num_mice) return keypoints, split_mask, batch ################ # Process data # ################ def mouse_feature_extractor(keypoints, noise_thresh=3e-3): # compute state features # body part 1: head, keypoints 0, 1, 2, 3 head_center = keypoints[..., 3, :] head_orientation = np.arctan2( keypoints[..., 0, 1] - keypoints[..., 3, 1], keypoints[..., 0, 0] - keypoints[..., 3, 0], ) # body part 2: forepaws, keypoints 3, 4, 5 # use keypoint 3 as center left_forepaw = keypoints[..., 4, :] - keypoints[..., 3, :] right_forepaw = keypoints[..., 5, :] - keypoints[..., 3, :] left_forepaw_r, left_forepaw_theta = to_polar_coordinates(left_forepaw) right_forepaw_r, right_forepaw_theta = to_polar_coordinates(right_forepaw) forepaws_theta = angle_clip(right_forepaw_theta - left_forepaw_theta) # connection body parts 2-3 spine = keypoints[..., 6, :] - keypoints[..., 3, :] spine_r, spine_theta = to_polar_coordinates(spine) # body part 3: bottom, keypoints 6, 7, 8, 9 bottom_center = keypoints[..., 6, :] # center bottom = keypoints[..., 7:, :] - bottom_center[..., np.newaxis, :] bottom_orientation = np.arctan2( keypoints[..., 6, 1] - keypoints[..., 9, 1], keypoints[..., 6, 0] - keypoints[..., 9, 0], ) bottom_rotation = np.array( [ [np.cos(-bottom_orientation), -np.sin(-bottom_orientation)], [np.sin(-bottom_orientation), np.cos(-bottom_orientation)], ] ) # rotate bottom = np.einsum("ijkp,lpij->ijkl", bottom, bottom_rotation) left_hindpaw_r, left_hindpaw_theta = to_polar_coordinates(bottom[..., 0, :]) left_hindpaw_theta = left_hindpaw_theta right_hindpaw_r, right_hindpaw_theta = to_polar_coordinates(bottom[..., 1, :]) right_hindpaw_theta = right_hindpaw_theta center_to_tail_r, _ = to_polar_coordinates(bottom[..., 2, :]) _, tail_theta_1 = to_polar_coordinates(bottom[..., 3, :] - bottom[..., 2, :]) tail_theta_1 = tail_theta_1 _, tail_theta_2 = to_polar_coordinates(bottom[..., 4, :] - bottom[..., 3, :]) tail_theta_2 = tail_theta_2 # compute action features ### body part 1: head head_vx = diff(head_center[..., 0]) head_vy = diff(head_center[..., 0]) head_vr, head_vtheta = to_polar_coordinates(np.stack([head_vx, head_vy], axis=-1)) head_vtheta[head_vr < noise_thresh] = 0.0 head_vr[head_vr < noise_thresh] = 0.0 head_dvtheta = angle_clip(diff(head_vtheta)) # orientation head_orientation_dtheta = angle_clip(diff(head_orientation)) ### body part 2: forepaws # left forepaw left_forepaw_dr = diff(left_forepaw_r) left_forepaw_dtheta = angle_clip(diff(left_forepaw_theta)) # right forepaw right_forepaw_dr = diff(left_forepaw_r) right_forepaw_dtheta = angle_clip(diff(right_forepaw_theta)) # angle between forepaws forepaws_dtheta = angle_clip(diff(forepaws_theta)) # body part 3: bottom # velocity bottom_vx = diff(bottom_center[..., 0]) bottom_vy = diff(bottom_center[..., 1]) bottom_vr, bottom_vtheta = to_polar_coordinates( np.stack([bottom_vx, bottom_vy], axis=-1) ) bottom_vtheta[bottom_vr < noise_thresh] = 0.0 bottom_vr[bottom_vr < noise_thresh] = 0.0 bottom_dvtheta = angle_clip(diff(bottom_vtheta)) # orientation bottom_orientation_dtheta = angle_clip(diff(bottom_orientation)) # left hindpaw left_hindpaw_dr = diff(left_hindpaw_r) left_hindpaw_dtheta = angle_clip(diff(left_hindpaw_theta)) # right hindpaw right_hindpaw_dr = diff(right_hindpaw_r) right_hindpaw_dtheta = angle_clip(diff(right_hindpaw_theta)) # body part 4: tail tail_dtheta_1 = angle_clip(diff(tail_theta_1)) tail_dtheta_2 = angle_clip(diff(tail_theta_2)) # connections between body parts center_to_tail_dr = diff(center_to_tail_r) spine_dr = diff(spine_r) spine_dtheta = angle_clip(diff(spine_theta)) ignore_frames = np.any(keypoints[..., 0] == 0, axis=-1) ignore_frames[:, 1:] = np.logical_or(ignore_frames[:, 1:], ignore_frames[:, :-1]) input_features = np.stack( [ head_center[..., 0], head_center[..., 1], np.cos(head_orientation), np.sin(head_orientation), left_forepaw_r, np.cos(left_forepaw_theta), np.sin(left_forepaw_theta), right_forepaw_r, np.cos(right_forepaw_theta), np.sin(right_forepaw_theta), np.cos(forepaws_theta), np.sin(forepaws_theta), bottom_center[..., 0], bottom_center[..., 1], np.cos(bottom_orientation), np.sin(bottom_orientation), left_hindpaw_r, np.cos(left_hindpaw_theta), np.sin(left_hindpaw_theta), right_hindpaw_r, np.cos(right_hindpaw_theta), np.sin(right_hindpaw_theta), center_to_tail_r, np.cos(tail_theta_1), np.sin(tail_theta_1), np.cos(tail_theta_2), np.sin(tail_theta_2), spine_r, np.cos(spine_theta), np.sin(spine_theta), head_vr, np.cos(head_vtheta), np.sin(head_vtheta), np.cos(head_dvtheta), np.sin(head_dvtheta), np.cos(head_orientation_dtheta), np.sin(head_orientation_dtheta), left_forepaw_dr, np.cos(left_forepaw_dtheta), np.sin(left_forepaw_dtheta), right_forepaw_dr, np.cos(right_forepaw_dtheta), np.sin(right_forepaw_dtheta), np.cos(forepaws_dtheta), np.sin(forepaws_dtheta), bottom_vr, np.cos(bottom_vtheta), np.sin(bottom_vtheta), np.cos(bottom_dvtheta), np.sin(bottom_dvtheta), np.cos(bottom_orientation_dtheta), np.sin(bottom_orientation_dtheta), left_hindpaw_dr, np.cos(left_hindpaw_dtheta), np.sin(left_hindpaw_dtheta), right_hindpaw_dr, np.cos(right_hindpaw_dtheta), np.sin(right_hindpaw_dtheta), np.cos(tail_dtheta_1), np.sin(tail_dtheta_1), np.cos(tail_dtheta_2), np.sin(tail_dtheta_2), center_to_tail_dr, spine_dr, np.cos(spine_dtheta), np.sin(spine_dtheta), ignore_frames, ], axis=-1, ) target_feats = np.stack( [ head_vr, head_vtheta, head_dvtheta, head_orientation_dtheta, bottom_vr, bottom_vtheta, bottom_dvtheta, bottom_orientation_dtheta, spine_dr, ], axis=-1, ) return input_features, target_feats, ignore_frames ################# # Training loop # ################# def train_loop( model, device, loader, optimizer, criterion, writer, step, log_every_step ): model.train() for data in tqdm(loader, position=1, leave=False): # todo convert to float input = data["input"].float().to(device) # (B, N, L) target = data["target_hist"].float().to(device) ignore_weights = data["ignore_weights"].to(device) # forward pass optimizer.zero_grad() embs, hoa_pred, byol_preds = model(input) # prediction task hoa_loss = criterion(target, hoa_pred, ignore_weights) # contrastive loss: short term batch_size, sequence_length, emb_dim = embs["short_term"].size() skip_frames, delta = 60, 5 view_1_id = ( torch.randint(sequence_length - skip_frames - delta, (batch_size,)) + skip_frames ) view_2_id = torch.randint(delta + 1, (batch_size,)) + view_1_id view_2_id = torch.clip(view_2_id, 0, sequence_length) view_1 = byol_preds["short_term"][torch.arange(batch_size), view_1_id] view_2 = embs["short_term"][torch.arange(batch_size), view_2_id] byol_loss_short_term = ( 1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean() ) # contrastive loss: long term batch_size, sequence_length, emb_dim = embs["long_term"].size() skip_frames = 100 view_1_id = ( torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames ) view_2_id = ( torch.randint(sequence_length - skip_frames, (batch_size,)) + skip_frames ) view_1 = byol_preds["long_term"][torch.arange(batch_size), view_1_id] view_2 = embs["long_term"][torch.arange(batch_size), view_2_id] byol_loss_long_term = ( 1 - F.cosine_similarity(view_1, view_2.clone().detach(), dim=-1).mean() ) # backprop loss = 5e2 * hoa_loss + 0.5 * byol_loss_short_term + 0.5 * byol_loss_long_term loss.backward() optimizer.step() step += 1 if step % log_every_step == 0: writer.add_scalar("train/hoa_loss", hoa_loss.item(), step) writer.add_scalar( "train/byol_loss_short_term", byol_loss_short_term.item(), step ) writer.add_scalar( "train/byol_loss_long_term", byol_loss_long_term.item(), step ) writer.add_scalar("train/total_loss", loss.item(), step) return step def main(): parser = argparse.ArgumentParser() parser.add_argument( "--job", default="train", const="train", nargs="?", choices=["train", "compute_representations"], help="select task", ) parser.add_argument("--data_root", type=str, default="./data/mabe") parser.add_argument("--cache_path", type=str, default="./data/mabe/mouse_triplet") parser.add_argument("--hoa_bins", type=int, default=32) parser.add_argument("--batch_size", type=int, default=32) parser.add_argument("--num_workers", type=int, default=16) parser.add_argument("--epochs", type=int, default=500) parser.add_argument("--lr", type=float, default=1e-3) parser.add_argument("--weight_decay", type=float, default=4e-5) parser.add_argument("--log_every_step", type=int, default=50) parser.add_argument("--ckpt_path", type=str, default=None) args = parser.parse_args() if args.job == "train": train(args) elif args.job == "compute_representations": compute_representations(args) def train(args): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # dataset if not Dataset.cache_is_available(args.cache_path, args.hoa_bins): print("Processing data...") keypoints, split_mask, batch = load_mice_triplet(args.data_root) input_feats, target_feats, ignore_frames = mouse_feature_extractor(keypoints) else: print("No need to process data") input_feats = target_feats = ignore_frames = None dataset = Dataset( input_feats=input_feats, target_feats=target_feats, ignore_frames=ignore_frames, cache_path=args.cache_path, cache=True, hoa_bins=args.hoa_bins, hoa_window=30, ) print("Number of sequences:", len(dataset)) # prepare dataloaders train_loader = DataLoader( dataset, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=args.num_workers, pin_memory=True, ) # build model model = BAMS( input_size=dataset.input_size, short_term=dict(num_channels=(64, 64, 32, 32), kernel_size=3), long_term=dict(num_channels=(64, 64, 64, 32, 32), kernel_size=3, dilation=4), predictor=dict( hidden_layers=(-1, 256, 512, 512, dataset.target_size * args.hoa_bins), ), # frame rate = 30, 6 steps = 200ms ).to(device) model_name = f"bams-mouse-triplet-{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}" writer = SummaryWriter("runs/" + model_name) main_params = [p for name, p in model.named_parameters() if "byol" not in name] byol_params = list(model.byol_predictors.parameters()) optimizer = optim.AdamW( [{"params": main_params}, {"params": byol_params, "lr": args.lr * 10}], lr=args.lr, weight_decay=args.weight_decay, ) scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[200], gamma=0.1)
criterion = HoALoss(hoa_bins=args.hoa_bins, skip_frames=60)
5
2023-12-05 16:26:57+00:00
8k
janmartchouk/vidgen
main.py
[ { "identifier": "ContentGetter", "path": "src/content_getter.py", "snippet": "class ContentGetter:\n def __init__(self, loglevel = logging.INFO):\n self.logger = setup_logger(__name__, loglevel, emoji='🌍')\n\n # Get a list of Reddit Posts from an RSS feed\n def from_subreddit(self, subreddit):\n if not subreddit in SUBREDDITS:\n self.logger.error(f\"{subreddit} is not configured\")\n exit(1)\n\n if SUBREDDITS[subreddit] == 'rss':\n return self.from_rss_subreddit(subreddit)\n elif SUBREDDITS[subreddit] == 'web':\n return self.from_web(subreddit)\n else:\n self.logger.error(f\"{subreddit} is not configured properly\")\n exit(1)\n\n def from_rss_subreddit(self, subreddit):\n data = feedparser.parse(f'https://reddit.com/r/{subreddit}/top.rss')\n posts = []\n failed_number = 0\n if data.entries:\n try:\n for entry in data.entries:\n paragraphs = BeautifulSoup(entry.content[0].value, 'html.parser').find_all('p')\n content = ''.join([p.get_text() for p in paragraphs])\n post_obj = Post(\n title=entry.title,\n author=entry.authors[0].name,\n subreddit=subreddit,\n content=content,\n crawl_date=time.time()\n )\n posts.append(post_obj)\n self.logger.debug(f\"RSS crawled the post {post_obj.short_hash}\")\n except Exception as e:\n failed_number += 1\n self.logger.debug(f\"Continuing, but encountered an error parsing RSS feed: {e}\")\n self.logger.info(f\"RSS crawled {len(posts)} posts from {subreddit} ({failed_number} failed)\")\n return posts\n \n def from_web(self, subreddit):\n soup = BeautifulSoup(requests.get(f'https://reddit.com/r/{subreddit}/top').content, 'html.parser')\n posts = []\n failed_number = 0\n for post in soup.find_all('shreddit-post'):\n try:\n post_obj = Post(\n title=post.find('a', id=lambda x: x and 'post-title' in x).text,\n author=post.find('span', {'slot': 'authorName'}).text,\n subreddit=subreddit,\n content=post.find('div', id=lambda x: x and 'post-rtjson-content' in x).text,\n crawl_date=time.time()\n )\n posts.append(post_obj)\n self.logger.debug(f\"Web crawled the post {post_obj.short_hash}\")\n except Exception as e:\n failed_number += 1\n self.logger.debug(f\"Continuing, but encountered an error parsing web feed: {e}\")\n self.logger.info(f\"Web crawled {len(posts)} posts from {subreddit} ({failed_number} failed)\")\n return posts" }, { "identifier": "SUBREDDITS", "path": "config/dicts.py", "snippet": "SUBREDDITS = {\n 'tifu': 'rss',\n 'confession': 'rss',\n 'relationship_advice': 'web',\n 'amitheasshole': 'rss'\n}" }, { "identifier": "VIDEO_DIR", "path": "config/structure.py", "snippet": "VIDEO_DIR = 'data/video/done'" }, { "identifier": "DB", "path": "src/db.py", "snippet": "class DB:\n def __init__(self, loglevel = logging.INFO):\n self.logger = setup_logger(__name__, loglevel, emoji='📚')\n\n # if db path invalid, exit\n if not structure.DB_PATH:\n self.logger.error(\"No DB_PATH configured in config\")\n exit(1)\n elif not os.path.isfile(structure.DB_PATH):\n self.logger.info(\"DB_PATH does not exist, creating\")\n open(structure.DB_PATH, 'w').close()\n\n self.conn = sqlite3.connect(structure.DB_PATH)\n self.c = self.conn.cursor()\n\n self.c.execute(\"CREATE TABLE IF NOT EXISTS posts (hash text, data blob)\")\n\n self.logger.info(\"Connected to DB\")\n \n def __del__(self): \n self.close()\n\n def close(self):\n if self.conn:\n self.conn.commit()\n self.conn.close()\n self.logger.info(\"DB connection closed\")\n\n # Fetch Post by hash from db\n def get_post_by_hash(self, hash):\n \"\"\"\n Fetch a Post object from the database by its hash.\n\n :param hash: The hash of the post to fetch.\n :return: The Post object if found, None otherwise.\n \"\"\"\n\n self.logger.debug(f\"Fetching post with hash {hash}\")\n\n self.c.execute(\"SELECT * FROM posts WHERE hash=?\", (hash,))\n result = self.c.fetchone()\n\n if result:\n hash, data = result\n self.logger.debug(f\"Found post with hash {hash}\")\n return pickle.loads(data)\n else:\n self.logger.debug(f\"Could not find post with hash {hash}\")\n return None\n\n # Insert a Post object into DB\n def insert_post(self, post: Post):\n \"\"\"\n Insert a Post object into the database.\n\n :param post: The Post object to insert.\n \"\"\"\n\n self.logger.debug(f\"Inserting post with hash {post.hash}\")\n\n self.c.execute(\"INSERT INTO posts VALUES (?, ?)\", (post.hash, pickle.dumps(post)))\n self.conn.commit()\n\n self.logger.debug(f\"Successfully inserted post with hash {post.hash}\")\n\n # Update a Post object in DB\n def update_post(self, post: Post):\n \"\"\"\n Update a Post object in the database.\n\n :param post: The Post object to update.\n \"\"\"\n\n self.logger.debug(f\"Updating post with hash {post.hash}\")\n\n self.c.execute(\"UPDATE posts SET data=? WHERE hash=?\", (pickle.dumps(post), post.hash))\n self.conn.commit()\n\n self.logger.debug(f\"Successfully updated post with hash {post.hash}\")\n\n # Delete a Post object from DB\n def delete_post(self, post: Post):\n \"\"\"\n Delete a Post object from the database.\n\n :param post: The Post object to delete.\n \"\"\"\n\n self.logger.debug(f\"Deleting post with hash {post.hash}\")\n\n self.c.execute(\"DELETE FROM posts WHERE hash=?\", (post.hash,))\n self.conn.commit()\n\n self.logger.debug(f\"Successfully deleted post with hash {post.hash}\")\n\n # Update a Post in DB\n def update_post(self, post: Post):\n \"\"\"\n Update a Post object in the database.\n\n :param post: The Post object to update.\n :param loglevel: The log level for logging messages (default: logging.INFO).\n \"\"\"\n\n self.logger.debug(f\"Updating post with hash {post.hash}\")\n\n self.c.execute(\"UPDATE posts SET data=? WHERE hash=?\", (pickle.dumps(post), post.hash))\n self.conn.commit()\n\n self.logger.debug(f\"Successfully updated post with hash {post.hash}\")\n\n def get_all_posts(self):\n \"\"\"\n Get all Post objects from the database.\n\n :return: A list of Post objects.\n \"\"\"\n\n self.logger.debug(\"Fetching all posts\")\n\n self.c.execute(\"SELECT * FROM posts\")\n result = self.c.fetchall()\n\n posts = []\n\n for hash, data in result:\n posts.append(pickle.loads(data))\n\n self.logger.debug(f\"Found {len(posts)} posts\")\n\n return posts" }, { "identifier": "setup_logger", "path": "utils/logger.py", "snippet": "def setup_logger(name, level=logging.INFO, emoji='⚙️'):\n \"\"\"To setup as many loggers as you want\"\"\"\n\n # Create handlers\n c_handler = logging.StreamHandler()\n c_handler.setLevel(level)\n\n # Create formatters and add it to handlers\n c_format = ColoredFormatter(emoji + ' | %(name)s | %(message)s')\n c_handler.setFormatter(c_format)\n\n # Add handlers to the logger\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(c_handler)\n\n return logger" }, { "identifier": "AudioGenerator", "path": "src/audio_generator.py", "snippet": "class AudioGenerator:\n def __init__(self, loglevel = logging.INFO):\n self.logger = setup_logger(__name__, loglevel, emoji='🎵')\n self.output_dir = AUDIO_DIR\n\n def from_post(self, post):\n \"\"\"\n Generate audio from a post.\n \n Args:\n post (Post): The post content to generate audio from.\n \n Returns:\n bool: True if audio generation is successful, False otherwise.\n \"\"\"\n\n voice = random.choice(TIKTOK_VOICES)\n texts = [post.title] + split_text_into_chunks(post.content)\n\n segments = AudioSegment.empty()\n\n with tempfile.TemporaryDirectory() as tmpdirname:\n try:\n for i, t in enumerate(texts):\n filename = os.path.join(tmpdirname, f\"{i}out.mp3\")\n\n sys.stdout = open(os.devnull, 'w') # block tiktok_tts print() spam\n tiktok_tts(t, voice, filename, play_sound=False)\n sys.stdout = sys.__stdout__ # restore printing\n\n segments += AudioSegment.from_file(filename, format='mp3')\n\n audio_path = os.path.join(self.output_dir, f'{post.hash}.mp3')\n segments.export(audio_path)\n self.logger.debug(f\"Generated audio for post {post.short_hash}\")\n return True\n except Exception as e:\n self.logger.error(f\"Failed to generate audio for post {post.short_hash}: {e}\")\n return False" }, { "identifier": "Subtitler", "path": "src/subtitler.py", "snippet": "class Subtitler:\n def __init__(self, loglevel = logging.INFO):\n self.logger = setup_logger(__name__, loglevel, emoji='📝')\n self.model = whisper.load_model('small.en')\n self.writer = whisper.utils.WriteSRT(SUBTITLE_DIR)\n\n def from_post(self, post):\n if post.audio and not post.subtitles:\n return self.from_hash(post.hash)\n elif not post.audio:\n self.logger.debug(f\"Skipping subtitle generation for post {shorten_hash(post.hash)} because it has no audio\")\n return False\n elif post.subtitles:\n self.logger.debug(f\"Skipping subtitle generation for post {shorten_hash(post.hash)} because it already has subtitles\")\n return True\n \n def from_hash(self, hash):\n \"\"\"\n Generate subtitles from a post hash.\n\n Args:\n hash (str): The hash of the post to generate subtitles from.\n \n Returns:\n bool: True if subtitle generation is successful, False otherwise.\n \"\"\"\n try:\n result = self.model.transcribe(f'{AUDIO_DIR}/{hash}.mp3')\n self.writer(result, f'{SUBTITLE_DIR}/{hash}.srt')\n self.logger.debug(f\"Generated subtitles for post {shorten_hash(hash)}\")\n return True\n except Exception as e:\n self.logger.error(f\"Failed to generate subtitles for post {shorten_hash(hash)}: {e}\")\n return False" }, { "identifier": "Composer", "path": "src/composer.py", "snippet": "class Composer:\n def __init__(self, loglevel = logging.INFO):\n self.logger = setup_logger(__name__, loglevel, emoji='🎥')\n\n def from_post(self, post):\n if post.video:\n self.logger.debug(f\"Skipping video generation for post {post.short_hash} because it already has a video\")\n return True\n elif post.audio and post.subtitles:\n return self.from_hash(post.hash)\n else:\n self.logger.debug(f\"Skipping video generation for post {post.short_hash} because it has no audio or subtitles\")\n return False\n\n def from_hash(self, hash):\n \"\"\"\n Generate a video from a post hash.\n\n Args:\n hash (str): The hash of the post to generate a video from.\n \n Returns:\n bool: True if video generation is successful, False otherwise.\n \"\"\"\n try:\n ffmpeg_loglevel = 'info' if self.logger.getEffectiveLevel() == logging.DEBUG else 'error'\n\n audio_path = f'{AUDIO_DIR}/{hash}.mp3'\n subtitle_path = f'{SUBTITLE_DIR}/{hash}.srt'\n background_content_dir = os.path.join(BACKGROUNDS_DIR, random.choice([dir for dir in os.listdir(BACKGROUNDS_DIR) if os.path.isdir(os.path.join(BACKGROUNDS_DIR, dir))]))\n video_path = os.path.join(background_content_dir, random.choice([f for f in os.listdir(background_content_dir) if f.endswith('.mp4')]))\n\n audio_duration = get_duration(audio_path)\n video_duration = get_duration(video_path)\n\n # Calculate the maximum start time to ensure the remaining duration is sufficient for the voice-over\n max_start_time = int(video_duration - audio_duration)\n\n # Use ffmpeg to overlay the audio onto the video starting from a random time within the limits\n start_time = random.randint(0, max_start_time)\n composed_output_file = os.path.join(VIDEO_DIR, f'{hash}_composed.mp4')\n # Use Multithreading to speed up the process\n cmd = ['ffmpeg', \n '-loglevel', ffmpeg_loglevel,\n '-threads', 'auto',\n '-hwaccel', 'cuda', #CUDA processing on NVIDA GPU\n '-i', video_path, \n '-i', audio_path, \n '-filter_complex', f\"[0:v]trim=start={start_time}:duration={audio_duration},setpts=PTS-STARTPTS[v0];[1:a]atrim=start=0:duration={audio_duration},asetpts=PTS-STARTPTS[a0]\", \n '-map', \"[v0]\", \n '-map', \"[a0]\", \n '-c:v', 'h264_nvenc', \n '-c:a', 'aac', \n '-strict', 'experimental', \n composed_output_file\n ]\n subprocess.run(cmd)\n\n self.logger.debug(f\"Composed AV - {shorten_hash(hash)}\")\n\n # Burn subtitles onto the composed video\n composed_subtitled_output_file = os.path.join(VIDEO_DIR, f\"{hash}_composed_subtitled.mp4\")\n cmd = ['ffmpeg', \n '-loglevel', ffmpeg_loglevel,\n '-threads', 'auto',\n '-hwaccel', 'cuda',\n '-i', composed_output_file, \n '-vf', f\"subtitles={subtitle_path}:force_style='Fontfile={FONT['PATH']},Fontname={FONT['NAME']},Fontsize={str(FONT['SIZE'])},MarginV=100,Alignment=6,PrimaryColor=&H00FFFFFF,OutlineColor=&H00FFFFFF'\", \n '-c:v', 'h264_nvenc',\n '-c:a', 'copy', \n composed_subtitled_output_file\n ]\n subprocess.run(cmd)\n\n self.logger.debug(f\"Burned subtitles - {shorten_hash(hash)}\")\n\n os.remove(composed_output_file)\n\n # Split into 60 (50 for safety) Second parts for shorts\n parts_dir = os.path.join(VIDEO_DIR, f\"{hash}_parts\")\n if not os.path.exists(parts_dir):\n os.mkdir(parts_dir)\n composed_subtitled_output_part_file = os.path.join(parts_dir, f'%03d.mp4')\n cmd = ['ffmpeg', \n '-loglevel', ffmpeg_loglevel,\n '-hwaccel', 'cuda',\n '-threads', 'auto',\n '-i', composed_subtitled_output_file, \n '-c', 'copy', \n '-f', 'segment', \n '-segment_time', '50', \n '-reset_timestamps', '1', \n composed_subtitled_output_part_file]\n subprocess.run(cmd)\n\n except Exception as e:\n self.logger.error(f\"Failed to generate video for post {shorten_hash(hash)}: {e}\")\n return False\n\n self.logger.debug(f\"Cut into parts - {shorten_hash(hash)}\")\n return True" }, { "identifier": "shorten_string", "path": "utils/text.py", "snippet": "def shorten_string(input_string, max_length=20):\n \"\"\"\n Shortens a given input string to a maximum length, appending '...' if necessary.\n\n Args:\n input_string (str): The input string to be shortened.\n max_length (int, optional): The maximum length of the shortened string. Defaults to 20.\n\n Returns:\n str: The shortened string.\n\n Example:\n >>> shorten_string(\"This is a sentence.\", max_length=10)\n 'This is a...'\n \"\"\"\n if len(input_string) <= max_length:\n return input_string\n else:\n return input_string[:max_length-3] + '...'" } ]
import logging import time import argparse import concurrent.futures import json import os from tqdm import tqdm from tqdm.contrib.logging import logging_redirect_tqdm from utils.youtube_uploader import YouTubeUploader from src.content_getter import ContentGetter from config.dicts import SUBREDDITS from config.structure import VIDEO_DIR from src.db import DB from utils.logger import setup_logger from src.audio_generator import AudioGenerator from src.subtitler import Subtitler from src.composer import Composer from utils.text import shorten_string
4,838
def update_db(logger, db: DB): """ Update the DB with new Posts from Reddit. """ start = time.time() logger.info("Updating DB") cg = ContentGetter(loglevel=logging.INFO) new_insertions = 0 with logging_redirect_tqdm(loggers = [logger, cg.logger, db.logger]): for subreddit in tqdm(SUBREDDITS, desc="Subreddits", leave=False): for post in tqdm(cg.from_subreddit(subreddit), desc="Posts", leave=False): if not db.get_post_by_hash(post.hash): db.insert_post(post) new_insertions += 1 if args.quick and new_insertions >= args.quick_limit: logger.debug(f"Quick mode: Stopping after {new_insertions} new insertions") break if args.quick and new_insertions >= args.quick_limit: break end = time.time() logger.info(f"DB Update complete. Inserted {new_insertions} new Posts. Finished in {end - start} seconds") def generate_audio(logger, db: DB, num_threads=16): """ Generate audio from Posts in the DB using multiple threads. """ start = time.time() logger.info("Generating audio") ag = AudioGenerator(loglevel=logging.INFO) failed_number = 0 successes = 0 all_posts = db.get_all_posts() if args.quick: all_posts = all_posts[:args.quick_limit] # only work on quick_limit posts in quick mode num_posts=len(all_posts) bar = tqdm(total=num_posts, desc="Audios", leave=False) with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor, logging_redirect_tqdm(loggers=[logger, ag.logger, db.logger]): future_to_post = {executor.submit(process_individual_post, post, ag, post.audio): post for post in all_posts} for future in concurrent.futures.as_completed(future_to_post): post = future_to_post[future] bar.set_postfix_str(post.short_hash) #update progressbar try: result = future.result() if result: post.audio = True db.update_post(post) #TODO successes += 1 if args.quick and successes >= args.quick_limit: logger.debug(f"Quick mode: Stopping after {successes} successes") break else: failed_number += 1 logger.debug(f"Failed to generate audio for post {post.short_hash} -- Deleting from DB") db.delete_post(post) #TODO except Exception as exc: logger.error(f"Error processing post {post.short_hash}: {exc}") finally: bar.update(1) #update progressbar end = time.time() bar.close() logger.info(f"Generated audio for {successes} Posts ({failed_number} failed). Finished in {end - start} seconds ({(end - start) / successes} seconds per Post)") def process_individual_post(post, generator, property): if not property: if generator.from_post(post): return True else: return False return True def generate_subtitles(logger, db: DB): """ Generate subtitles from Posts in the DB. """ ### We cannot multithread this well since Subtitler uses a ### full machine learning model loaded into RAM in the background. ### For multiple threads, we would need to load it multiple times. bad idea. ### If you implement Subtitler() to, i.e., use a server such as the whisper API, ### then you can multithread this start = time.time() logger.info("Generating subtitles")
def update_db(logger, db: DB): """ Update the DB with new Posts from Reddit. """ start = time.time() logger.info("Updating DB") cg = ContentGetter(loglevel=logging.INFO) new_insertions = 0 with logging_redirect_tqdm(loggers = [logger, cg.logger, db.logger]): for subreddit in tqdm(SUBREDDITS, desc="Subreddits", leave=False): for post in tqdm(cg.from_subreddit(subreddit), desc="Posts", leave=False): if not db.get_post_by_hash(post.hash): db.insert_post(post) new_insertions += 1 if args.quick and new_insertions >= args.quick_limit: logger.debug(f"Quick mode: Stopping after {new_insertions} new insertions") break if args.quick and new_insertions >= args.quick_limit: break end = time.time() logger.info(f"DB Update complete. Inserted {new_insertions} new Posts. Finished in {end - start} seconds") def generate_audio(logger, db: DB, num_threads=16): """ Generate audio from Posts in the DB using multiple threads. """ start = time.time() logger.info("Generating audio") ag = AudioGenerator(loglevel=logging.INFO) failed_number = 0 successes = 0 all_posts = db.get_all_posts() if args.quick: all_posts = all_posts[:args.quick_limit] # only work on quick_limit posts in quick mode num_posts=len(all_posts) bar = tqdm(total=num_posts, desc="Audios", leave=False) with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor, logging_redirect_tqdm(loggers=[logger, ag.logger, db.logger]): future_to_post = {executor.submit(process_individual_post, post, ag, post.audio): post for post in all_posts} for future in concurrent.futures.as_completed(future_to_post): post = future_to_post[future] bar.set_postfix_str(post.short_hash) #update progressbar try: result = future.result() if result: post.audio = True db.update_post(post) #TODO successes += 1 if args.quick and successes >= args.quick_limit: logger.debug(f"Quick mode: Stopping after {successes} successes") break else: failed_number += 1 logger.debug(f"Failed to generate audio for post {post.short_hash} -- Deleting from DB") db.delete_post(post) #TODO except Exception as exc: logger.error(f"Error processing post {post.short_hash}: {exc}") finally: bar.update(1) #update progressbar end = time.time() bar.close() logger.info(f"Generated audio for {successes} Posts ({failed_number} failed). Finished in {end - start} seconds ({(end - start) / successes} seconds per Post)") def process_individual_post(post, generator, property): if not property: if generator.from_post(post): return True else: return False return True def generate_subtitles(logger, db: DB): """ Generate subtitles from Posts in the DB. """ ### We cannot multithread this well since Subtitler uses a ### full machine learning model loaded into RAM in the background. ### For multiple threads, we would need to load it multiple times. bad idea. ### If you implement Subtitler() to, i.e., use a server such as the whisper API, ### then you can multithread this start = time.time() logger.info("Generating subtitles")
st = Subtitler(loglevel=logging.INFO)
6
2023-12-14 13:00:22+00:00
8k